diff options
Diffstat (limited to 'fs/iomap/buffered-io.c')
-rw-r--r-- | fs/iomap/buffered-io.c | 1440 |
1 files changed, 669 insertions, 771 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 7c84c4c027c4..91ee0b308e13 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -21,83 +21,87 @@ #include "../internal.h" +#define IOEND_BATCH_SIZE 4096 + /* - * Structure allocated for each page when block size < PAGE_SIZE to track - * sub-page uptodate status and I/O completions. + * Structure allocated for each folio when block size < folio size + * to track sub-folio uptodate status and I/O completions. */ struct iomap_page { - atomic_t read_count; - atomic_t write_count; + atomic_t read_bytes_pending; + atomic_t write_bytes_pending; spinlock_t uptodate_lock; - DECLARE_BITMAP(uptodate, PAGE_SIZE / 512); + unsigned long uptodate[]; }; -static inline struct iomap_page *to_iomap_page(struct page *page) +static inline struct iomap_page *to_iomap_page(struct folio *folio) { - if (page_has_private(page)) - return (struct iomap_page *)page_private(page); + if (folio_test_private(folio)) + return folio_get_private(folio); return NULL; } static struct bio_set iomap_ioend_bioset; static struct iomap_page * -iomap_page_create(struct inode *inode, struct page *page) +iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) { - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = to_iomap_page(folio); + unsigned int nr_blocks = i_blocks_per_folio(inode, folio); + gfp_t gfp; - if (iop || i_blocksize(inode) == PAGE_SIZE) + if (iop || nr_blocks <= 1) return iop; - iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL); - atomic_set(&iop->read_count, 0); - atomic_set(&iop->write_count, 0); - spin_lock_init(&iop->uptodate_lock); - bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); + if (flags & IOMAP_NOWAIT) + gfp = GFP_NOWAIT; + else + gfp = GFP_NOFS | __GFP_NOFAIL; - /* - * migrate_page_move_mapping() assumes that pages with private data have - * their count elevated by 1. - */ - get_page(page); - set_page_private(page, (unsigned long)iop); - SetPagePrivate(page); + iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), + gfp); + if (iop) { + spin_lock_init(&iop->uptodate_lock); + if (folio_test_uptodate(folio)) + bitmap_fill(iop->uptodate, nr_blocks); + folio_attach_private(folio, iop); + } return iop; } -static void -iomap_page_release(struct page *page) +static void iomap_page_release(struct folio *folio) { - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = folio_detach_private(folio); + struct inode *inode = folio->mapping->host; + unsigned int nr_blocks = i_blocks_per_folio(inode, folio); if (!iop) return; - WARN_ON_ONCE(atomic_read(&iop->read_count)); - WARN_ON_ONCE(atomic_read(&iop->write_count)); - ClearPagePrivate(page); - set_page_private(page, 0); - put_page(page); + WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); + WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); + WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != + folio_test_uptodate(folio)); kfree(iop); } /* - * Calculate the range inside the page that we actually need to read. + * Calculate the range inside the folio that we actually need to read. */ -static void -iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, - loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) +static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, + loff_t *pos, loff_t length, size_t *offp, size_t *lenp) { + struct iomap_page *iop = to_iomap_page(folio); loff_t orig_pos = *pos; loff_t isize = i_size_read(inode); unsigned block_bits = inode->i_blkbits; unsigned block_size = (1 << block_bits); - unsigned poff = offset_in_page(*pos); - unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); + size_t poff = offset_in_folio(folio, *pos); + size_t plen = min_t(loff_t, folio_size(folio) - poff, length); unsigned first = poff >> block_bits; unsigned last = (poff + plen - 1) >> block_bits; /* - * If the block size is smaller than the page size we need to check the + * If the block size is smaller than the page size, we need to check the * per-block uptodate status and adjust the offset and length if needed * to avoid reading in already uptodate ranges. */ @@ -125,12 +129,12 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, } /* - * If the extent spans the block that contains the i_size we need to + * If the extent spans the block that contains the i_size, we need to * handle both halves separately so that we properly zero data in the * page cache for blocks that are entirely outside of i_size. */ if (orig_pos <= isize && orig_pos + length > isize) { - unsigned end = offset_in_page(isize - 1) >> block_bits; + unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; if (first <= end && last > end) plen -= (last - end) * block_size; @@ -140,432 +144,358 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, *lenp = plen; } -static void -iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len) +static void iomap_iop_set_range_uptodate(struct folio *folio, + struct iomap_page *iop, size_t off, size_t len) { - struct iomap_page *iop = to_iomap_page(page); - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; unsigned first = off >> inode->i_blkbits; unsigned last = (off + len - 1) >> inode->i_blkbits; - bool uptodate = true; unsigned long flags; - unsigned int i; spin_lock_irqsave(&iop->uptodate_lock, flags); - for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) { - if (i >= first && i <= last) - set_bit(i, iop->uptodate); - else if (!test_bit(i, iop->uptodate)) - uptodate = false; - } - - if (uptodate) - SetPageUptodate(page); + bitmap_set(iop->uptodate, first, last - first + 1); + if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio))) + folio_mark_uptodate(folio); spin_unlock_irqrestore(&iop->uptodate_lock, flags); } -static void -iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len) +static void iomap_set_range_uptodate(struct folio *folio, + struct iomap_page *iop, size_t off, size_t len) { - if (PageError(page)) - return; - - if (page_has_private(page)) - iomap_iop_set_range_uptodate(page, off, len); + if (iop) + iomap_iop_set_range_uptodate(folio, iop, off, len); else - SetPageUptodate(page); + folio_mark_uptodate(folio); } -static void -iomap_read_finish(struct iomap_page *iop, struct page *page) +static void iomap_finish_folio_read(struct folio *folio, size_t offset, + size_t len, int error) { - if (!iop || atomic_dec_and_test(&iop->read_count)) - unlock_page(page); -} - -static void -iomap_read_page_end_io(struct bio_vec *bvec, int error) -{ - struct page *page = bvec->bv_page; - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = to_iomap_page(folio); if (unlikely(error)) { - ClearPageUptodate(page); - SetPageError(page); + folio_clear_uptodate(folio); + folio_set_error(folio); } else { - iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len); + iomap_set_range_uptodate(folio, iop, offset, len); } - iomap_read_finish(iop, page); + if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending)) + folio_unlock(folio); } -static void -iomap_read_end_io(struct bio *bio) +static void iomap_read_end_io(struct bio *bio) { int error = blk_status_to_errno(bio->bi_status); - struct bio_vec *bvec; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bvec, bio, iter_all) - iomap_read_page_end_io(bvec, error); + bio_for_each_folio_all(fi, bio) + iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); bio_put(bio); } struct iomap_readpage_ctx { - struct page *cur_page; - bool cur_page_in_bio; - bool is_readahead; + struct folio *cur_folio; + bool cur_folio_in_bio; struct bio *bio; - struct list_head *pages; + struct readahead_control *rac; }; -static void -iomap_read_inline_data(struct inode *inode, struct page *page, - struct iomap *iomap) +/** + * iomap_read_inline_data - copy inline data into the page cache + * @iter: iteration structure + * @folio: folio to copy to + * + * Copy the inline data in @iter into @folio and zero out the rest of the folio. + * Only a single IOMAP_INLINE extent is allowed at the end of each file. + * Returns zero for success to complete the read, or the usual negative errno. + */ +static int iomap_read_inline_data(const struct iomap_iter *iter, + struct folio *folio) { - size_t size = i_size_read(inode); + struct iomap_page *iop; + const struct iomap *iomap = iomap_iter_srcmap(iter); + size_t size = i_size_read(iter->inode) - iomap->offset; + size_t poff = offset_in_page(iomap->offset); + size_t offset = offset_in_folio(folio, iomap->offset); void *addr; - if (PageUptodate(page)) - return; + if (folio_test_uptodate(folio)) + return 0; - BUG_ON(page->index); - BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data)); + if (WARN_ON_ONCE(size > PAGE_SIZE - poff)) + return -EIO; + if (WARN_ON_ONCE(size > PAGE_SIZE - + offset_in_page(iomap->inline_data))) + return -EIO; + if (WARN_ON_ONCE(size > iomap->length)) + return -EIO; + if (offset > 0) + iop = iomap_page_create(iter->inode, folio, iter->flags); + else + iop = to_iomap_page(folio); - addr = kmap_atomic(page); + addr = kmap_local_folio(folio, offset); memcpy(addr, iomap->inline_data, size); - memset(addr + size, 0, PAGE_SIZE - size); - kunmap_atomic(addr); - SetPageUptodate(page); + memset(addr + size, 0, PAGE_SIZE - poff - size); + kunmap_local(addr); + iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff); + return 0; } -static inline bool iomap_block_needs_zeroing(struct inode *inode, - struct iomap *iomap, loff_t pos) +static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, + loff_t pos) { - return iomap->type != IOMAP_MAPPED || - (iomap->flags & IOMAP_F_NEW) || - pos >= i_size_read(inode); + const struct iomap *srcmap = iomap_iter_srcmap(iter); + + return srcmap->type != IOMAP_MAPPED || + (srcmap->flags & IOMAP_F_NEW) || + pos >= i_size_read(iter->inode); } -static loff_t -iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data, - struct iomap *iomap, struct iomap *srcmap) +static loff_t iomap_readpage_iter(const struct iomap_iter *iter, + struct iomap_readpage_ctx *ctx, loff_t offset) { - struct iomap_readpage_ctx *ctx = data; - struct page *page = ctx->cur_page; - struct iomap_page *iop = iomap_page_create(inode, page); - bool same_page = false, is_contig = false; + const struct iomap *iomap = &iter->iomap; + loff_t pos = iter->pos + offset; + loff_t length = iomap_length(iter) - offset; + struct folio *folio = ctx->cur_folio; + struct iomap_page *iop; loff_t orig_pos = pos; - unsigned poff, plen; + size_t poff, plen; sector_t sector; - if (iomap->type == IOMAP_INLINE) { - WARN_ON_ONCE(pos); - iomap_read_inline_data(inode, page, iomap); - return PAGE_SIZE; - } + if (iomap->type == IOMAP_INLINE) + return iomap_read_inline_data(iter, folio); /* zero post-eof blocks as the page may be mapped */ - iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen); + iop = iomap_page_create(iter->inode, folio, iter->flags); + iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); if (plen == 0) goto done; - if (iomap_block_needs_zeroing(inode, iomap, pos)) { - zero_user(page, poff, plen); - iomap_set_range_uptodate(page, poff, plen); - goto done; - } - - ctx->cur_page_in_bio = true; - - /* - * Try to merge into a previous segment if we can. - */ - sector = iomap_sector(iomap, pos); - if (ctx->bio && bio_end_sector(ctx->bio) == sector) - is_contig = true; - - if (is_contig && - __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) { - if (!same_page && iop) - atomic_inc(&iop->read_count); + if (iomap_block_needs_zeroing(iter, pos)) { + folio_zero_range(folio, poff, plen); + iomap_set_range_uptodate(folio, iop, poff, plen); goto done; } - /* - * If we start a new segment we need to increase the read count, and we - * need to do so before submitting any previous full bio to make sure - * that we don't prematurely unlock the page. - */ + ctx->cur_folio_in_bio = true; if (iop) - atomic_inc(&iop->read_count); + atomic_add(plen, &iop->read_bytes_pending); - if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) { - gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); - int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; + sector = iomap_sector(iomap, pos); + if (!ctx->bio || + bio_end_sector(ctx->bio) != sector || + !bio_add_folio(ctx->bio, folio, plen, poff)) { + gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); + gfp_t orig_gfp = gfp; + unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); if (ctx->bio) submit_bio(ctx->bio); - if (ctx->is_readahead) /* same as readahead_gfp_mask */ + if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; - ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); - ctx->bio->bi_opf = REQ_OP_READ; - if (ctx->is_readahead) + ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), + REQ_OP_READ, gfp); + /* + * If the bio_alloc fails, try it again for a single page to + * avoid having to deal with partial page reads. This emulates + * what do_mpage_read_folio does. + */ + if (!ctx->bio) { + ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, + orig_gfp); + } + if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; - bio_set_dev(ctx->bio, iomap->bdev); ctx->bio->bi_end_io = iomap_read_end_io; + bio_add_folio(ctx->bio, folio, plen, poff); } - bio_add_page(ctx->bio, page, plen, poff); done: /* * Move the caller beyond our range so that it keeps making progress. - * For that we have to include any leading non-uptodate ranges, but + * For that, we have to include any leading non-uptodate ranges, but * we can skip trailing ones as they will be handled in the next * iteration. */ return pos - orig_pos + plen; } -int -iomap_readpage(struct page *page, const struct iomap_ops *ops) +int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) { - struct iomap_readpage_ctx ctx = { .cur_page = page }; - struct inode *inode = page->mapping->host; - unsigned poff; - loff_t ret; - - trace_iomap_readpage(page->mapping->host, 1); - - for (poff = 0; poff < PAGE_SIZE; poff += ret) { - ret = iomap_apply(inode, page_offset(page) + poff, - PAGE_SIZE - poff, 0, ops, &ctx, - iomap_readpage_actor); - if (ret <= 0) { - WARN_ON_ONCE(ret == 0); - SetPageError(page); - break; - } - } + struct iomap_iter iter = { + .inode = folio->mapping->host, + .pos = folio_pos(folio), + .len = folio_size(folio), + }; + struct iomap_readpage_ctx ctx = { + .cur_folio = folio, + }; + int ret; + + trace_iomap_readpage(iter.inode, 1); + + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = iomap_readpage_iter(&iter, &ctx, 0); + + if (ret < 0) + folio_set_error(folio); if (ctx.bio) { submit_bio(ctx.bio); - WARN_ON_ONCE(!ctx.cur_page_in_bio); + WARN_ON_ONCE(!ctx.cur_folio_in_bio); } else { - WARN_ON_ONCE(ctx.cur_page_in_bio); - unlock_page(page); + WARN_ON_ONCE(ctx.cur_folio_in_bio); + folio_unlock(folio); } /* - * Just like mpage_readpages and block_read_full_page we always - * return 0 and just mark the page as PageError on errors. This - * should be cleaned up all through the stack eventually. + * Just like mpage_readahead and block_read_full_folio, we always + * return 0 and just set the folio error flag on errors. This + * should be cleaned up throughout the stack eventually. */ return 0; } -EXPORT_SYMBOL_GPL(iomap_readpage); +EXPORT_SYMBOL_GPL(iomap_read_folio); -static struct page * -iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos, - loff_t length, loff_t *done) +static loff_t iomap_readahead_iter(const struct iomap_iter *iter, + struct iomap_readpage_ctx *ctx) { - while (!list_empty(pages)) { - struct page *page = lru_to_page(pages); - - if (page_offset(page) >= (u64)pos + length) - break; - - list_del(&page->lru); - if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, - GFP_NOFS)) - return page; - - /* - * If we already have a page in the page cache at index we are - * done. Upper layers don't care if it is uptodate after the - * readpages call itself as every page gets checked again once - * actually needed. - */ - *done += PAGE_SIZE; - put_page(page); - } - - return NULL; -} - -static loff_t -iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length, - void *data, struct iomap *iomap, struct iomap *srcmap) -{ - struct iomap_readpage_ctx *ctx = data; + loff_t length = iomap_length(iter); loff_t done, ret; for (done = 0; done < length; done += ret) { - if (ctx->cur_page && offset_in_page(pos + done) == 0) { - if (!ctx->cur_page_in_bio) - unlock_page(ctx->cur_page); - put_page(ctx->cur_page); - ctx->cur_page = NULL; + if (ctx->cur_folio && + offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { + if (!ctx->cur_folio_in_bio) + folio_unlock(ctx->cur_folio); + ctx->cur_folio = NULL; } - if (!ctx->cur_page) { - ctx->cur_page = iomap_next_page(inode, ctx->pages, - pos, length, &done); - if (!ctx->cur_page) - break; - ctx->cur_page_in_bio = false; + if (!ctx->cur_folio) { + ctx->cur_folio = readahead_folio(ctx->rac); + ctx->cur_folio_in_bio = false; } - ret = iomap_readpage_actor(inode, pos + done, length - done, - ctx, iomap, srcmap); + ret = iomap_readpage_iter(iter, ctx, done); + if (ret <= 0) + return ret; } return done; } -int -iomap_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages, const struct iomap_ops *ops) +/** + * iomap_readahead - Attempt to read pages from a file. + * @rac: Describes the pages to be read. + * @ops: The operations vector for the filesystem. + * + * This function is for filesystems to call to implement their readahead + * address_space operation. + * + * Context: The @ops callbacks may submit I/O (eg to read the addresses of + * blocks from disc), and may wait for it. The caller may be trying to + * access a different page, and so sleeping excessively should be avoided. + * It may allocate memory, but should avoid costly allocations. This + * function is called with memalloc_nofs set, so allocations will not cause + * the filesystem to be reentered. + */ +void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) { + struct iomap_iter iter = { + .inode = rac->mapping->host, + .pos = readahead_pos(rac), + .len = readahead_length(rac), + }; struct iomap_readpage_ctx ctx = { - .pages = pages, - .is_readahead = true, + .rac = rac, }; - loff_t pos = page_offset(list_entry(pages->prev, struct page, lru)); - loff_t last = page_offset(list_entry(pages->next, struct page, lru)); - loff_t length = last - pos + PAGE_SIZE, ret = 0; - trace_iomap_readpages(mapping->host, nr_pages); + trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); + + while (iomap_iter(&iter, ops) > 0) + iter.processed = iomap_readahead_iter(&iter, &ctx); - while (length > 0) { - ret = iomap_apply(mapping->host, pos, length, 0, ops, - &ctx, iomap_readpages_actor); - if (ret <= 0) { - WARN_ON_ONCE(ret == 0); - goto done; - } - pos += ret; - length -= ret; - } - ret = 0; -done: if (ctx.bio) submit_bio(ctx.bio); - if (ctx.cur_page) { - if (!ctx.cur_page_in_bio) - unlock_page(ctx.cur_page); - put_page(ctx.cur_page); + if (ctx.cur_folio) { + if (!ctx.cur_folio_in_bio) + folio_unlock(ctx.cur_folio); } - - /* - * Check that we didn't lose a page due to the arcance calling - * conventions.. - */ - WARN_ON_ONCE(!ret && !list_empty(ctx.pages)); - return ret; } -EXPORT_SYMBOL_GPL(iomap_readpages); +EXPORT_SYMBOL_GPL(iomap_readahead); /* - * iomap_is_partially_uptodate checks whether blocks within a page are + * iomap_is_partially_uptodate checks whether blocks within a folio are * uptodate or not. * - * Returns true if all blocks which correspond to a file portion - * we want to read within the page are uptodate. + * Returns true if all blocks which correspond to the specified part + * of the folio are uptodate. */ -int -iomap_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count) +bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { - struct iomap_page *iop = to_iomap_page(page); - struct inode *inode = page->mapping->host; - unsigned len, first, last; - unsigned i; + struct iomap_page *iop = to_iomap_page(folio); + struct inode *inode = folio->mapping->host; + unsigned first, last, i; - /* Limit range to one page */ - len = min_t(unsigned, PAGE_SIZE - from, count); + if (!iop) + return false; - /* First and last blocks in range within page */ - first = from >> inode->i_blkbits; - last = (from + len - 1) >> inode->i_blkbits; + /* Caller's range may extend past the end of this folio */ + count = min(folio_size(folio) - from, count); - if (iop) { - for (i = first; i <= last; i++) - if (!test_bit(i, iop->uptodate)) - return 0; - return 1; - } + /* First and last blocks in range within folio */ + first = from >> inode->i_blkbits; + last = (from + count - 1) >> inode->i_blkbits; - return 0; + for (i = first; i <= last; i++) + if (!test_bit(i, iop->uptodate)) + return false; + return true; } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); -int -iomap_releasepage(struct page *page, gfp_t gfp_mask) +bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) { - trace_iomap_releasepage(page->mapping->host, page, 0, 0); + trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), + folio_size(folio)); /* - * mm accommodates an old ext3 case where clean pages might not have had - * the dirty bit cleared. Thus, it can send actual dirty pages to - * ->releasepage() via shrink_active_list(), skip those here. + * mm accommodates an old ext3 case where clean folios might + * not have had the dirty bit cleared. Thus, it can send actual + * dirty folios to ->release_folio() via shrink_active_list(); + * skip those here. */ - if (PageDirty(page) || PageWriteback(page)) - return 0; - iomap_page_release(page); - return 1; + if (folio_test_dirty(folio) || folio_test_writeback(folio)) + return false; + iomap_page_release(folio); + return true; } -EXPORT_SYMBOL_GPL(iomap_releasepage); +EXPORT_SYMBOL_GPL(iomap_release_folio); -void -iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len) +void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) { - trace_iomap_invalidatepage(page->mapping->host, page, offset, len); + trace_iomap_invalidate_folio(folio->mapping->host, + folio_pos(folio) + offset, len); /* - * If we are invalidating the entire page, clear the dirty state from it - * and release it to avoid unnecessary buildup of the LRU. + * If we're invalidating the entire folio, clear the dirty state + * from it and release it to avoid unnecessary buildup of the LRU. */ - if (offset == 0 && len == PAGE_SIZE) { - WARN_ON_ONCE(PageWriteback(page)); - cancel_dirty_page(page); - iomap_page_release(page); - } -} -EXPORT_SYMBOL_GPL(iomap_invalidatepage); - -#ifdef CONFIG_MIGRATION -int -iomap_migrate_page(struct address_space *mapping, struct page *newpage, - struct page *page, enum migrate_mode mode) -{ - int ret; - - ret = migrate_page_move_mapping(mapping, newpage, page, 0); - if (ret != MIGRATEPAGE_SUCCESS) - return ret; - - if (page_has_private(page)) { - ClearPagePrivate(page); - get_page(newpage); - set_page_private(newpage, page_private(page)); - set_page_private(page, 0); - put_page(page); - SetPagePrivate(newpage); + if (offset == 0 && len == folio_size(folio)) { + WARN_ON_ONCE(folio_test_writeback(folio)); + folio_cancel_dirty(folio); + iomap_page_release(folio); + } else if (folio_test_large(folio)) { + /* Must release the iop so the page can be split */ + WARN_ON_ONCE(!folio_test_uptodate(folio) && + folio_test_dirty(folio)); + iomap_page_release(folio); } - - if (mode != MIGRATE_SYNC_NO_COPY) - migrate_page_copy(newpage, page); - else - migrate_page_states(newpage, page); - return MIGRATEPAGE_SUCCESS; } -EXPORT_SYMBOL_GPL(iomap_migrate_page); -#endif /* CONFIG_MIGRATION */ - -enum { - IOMAP_WRITE_F_UNSHARE = (1 << 0), -}; +EXPORT_SYMBOL_GPL(iomap_invalidate_folio); static void iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) @@ -577,200 +507,202 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) * write started inside the existing inode size. */ if (pos + len > i_size) - truncate_pagecache_range(inode, max(pos, i_size), pos + len); + truncate_pagecache_range(inode, max(pos, i_size), + pos + len - 1); } -static int -iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff, - unsigned plen, struct iomap *iomap) +static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, + size_t poff, size_t plen, const struct iomap *iomap) { struct bio_vec bvec; struct bio bio; - bio_init(&bio, &bvec, 1); - bio.bi_opf = REQ_OP_READ; + bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); - bio_set_dev(&bio, iomap->bdev); - __bio_add_page(&bio, page, plen, poff); + bio_add_folio(&bio, folio, plen, poff); return submit_bio_wait(&bio); } -static int -__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags, - struct page *page, struct iomap *srcmap) +static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, + size_t len, struct folio *folio) { - struct iomap_page *iop = iomap_page_create(inode, page); - loff_t block_size = i_blocksize(inode); - loff_t block_start = pos & ~(block_size - 1); - loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1); - unsigned from = offset_in_page(pos), to = from + len, poff, plen; - int status; - - if (PageUptodate(page)) + const struct iomap *srcmap = iomap_iter_srcmap(iter); + struct iomap_page *iop; + loff_t block_size = i_blocksize(iter->inode); + loff_t block_start = round_down(pos, block_size); + loff_t block_end = round_up(pos + len, block_size); + unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); + size_t from = offset_in_folio(folio, pos), to = from + len; + size_t poff, plen; + + if (folio_test_uptodate(folio)) return 0; + folio_clear_error(folio); + + iop = iomap_page_create(iter->inode, folio, iter->flags); + if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1) + return -EAGAIN; do { - iomap_adjust_read_range(inode, iop, &block_start, + iomap_adjust_read_range(iter->inode, folio, &block_start, block_end - block_start, &poff, &plen); if (plen == 0) break; - if (!(flags & IOMAP_WRITE_F_UNSHARE) && + if (!(iter->flags & IOMAP_UNSHARE) && (from <= poff || from >= poff + plen) && (to <= poff || to >= poff + plen)) continue; - if (iomap_block_needs_zeroing(inode, srcmap, block_start)) { - if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE)) + if (iomap_block_needs_zeroing(iter, block_start)) { + if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) return -EIO; - zero_user_segments(page, poff, from, to, poff + plen); - iomap_set_range_uptodate(page, poff, plen); - continue; - } + folio_zero_segments(folio, poff, from, to, poff + plen); + } else { + int status; - status = iomap_read_page_sync(block_start, page, poff, plen, - srcmap); - if (status) - return status; + if (iter->flags & IOMAP_NOWAIT) + return -EAGAIN; + + status = iomap_read_folio_sync(block_start, folio, + poff, plen, srcmap); + if (status) + return status; + } + iomap_set_range_uptodate(folio, iop, poff, plen); } while ((block_start += plen) < block_end); return 0; } -static int -iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags, - struct page **pagep, struct iomap *iomap, struct iomap *srcmap) +static int iomap_write_begin_inline(const struct iomap_iter *iter, + struct folio *folio) +{ + /* needs more work for the tailpacking case; disable for now */ + if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) + return -EIO; + return iomap_read_inline_data(iter, folio); +} + +static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, + size_t len, struct folio **foliop) { - const struct iomap_page_ops *page_ops = iomap->page_ops; - struct page *page; + const struct iomap_page_ops *page_ops = iter->iomap.page_ops; + const struct iomap *srcmap = iomap_iter_srcmap(iter); + struct folio *folio; + unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; int status = 0; - BUG_ON(pos + len > iomap->offset + iomap->length); - if (srcmap != iomap) + if (iter->flags & IOMAP_NOWAIT) + fgp |= FGP_NOWAIT; + + BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); + if (srcmap != &iter->iomap) BUG_ON(pos + len > srcmap->offset + srcmap->length); if (fatal_signal_pending(current)) return -EINTR; + if (!mapping_large_folio_support(iter->inode->i_mapping)) + len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); + if (page_ops && page_ops->page_prepare) { - status = page_ops->page_prepare(inode, pos, len, iomap); + status = page_ops->page_prepare(iter->inode, pos, len); if (status) return status; } - page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT, - AOP_FLAG_NOFS); - if (!page) { - status = -ENOMEM; + folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, + fgp, mapping_gfp_mask(iter->inode->i_mapping)); + if (!folio) { + status = (iter->flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOMEM; goto out_no_page; } + if (pos + len > folio_pos(folio) + folio_size(folio)) + len = folio_pos(folio) + folio_size(folio) - pos; if (srcmap->type == IOMAP_INLINE) - iomap_read_inline_data(inode, page, srcmap); - else if (iomap->flags & IOMAP_F_BUFFER_HEAD) - status = __block_write_begin_int(page, pos, len, NULL, srcmap); + status = iomap_write_begin_inline(iter, folio); + else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) + status = __block_write_begin_int(folio, pos, len, NULL, srcmap); else - status = __iomap_write_begin(inode, pos, len, flags, page, - srcmap); + status = __iomap_write_begin(iter, pos, len, folio); if (unlikely(status)) goto out_unlock; - *pagep = page; + *foliop = folio; return 0; out_unlock: - unlock_page(page); - put_page(page); - iomap_write_failed(inode, pos, len); + folio_unlock(folio); + folio_put(folio); + iomap_write_failed(iter->inode, pos, len); out_no_page: if (page_ops && page_ops->page_done) - page_ops->page_done(inode, pos, 0, NULL, iomap); + page_ops->page_done(iter->inode, pos, 0, NULL); return status; } -int -iomap_set_page_dirty(struct page *page) -{ - struct address_space *mapping = page_mapping(page); - int newly_dirty; - - if (unlikely(!mapping)) - return !TestSetPageDirty(page); - - /* - * Lock out page->mem_cgroup migration to keep PageDirty - * synchronized with per-memcg dirty page counters. - */ - lock_page_memcg(page); - newly_dirty = !TestSetPageDirty(page); - if (newly_dirty) - __set_page_dirty(page, mapping, 0); - unlock_page_memcg(page); - - if (newly_dirty) - __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); - return newly_dirty; -} -EXPORT_SYMBOL_GPL(iomap_set_page_dirty); - -static int -__iomap_write_end(struct inode *inode, loff_t pos, unsigned len, - unsigned copied, struct page *page) +static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, + size_t copied, struct folio *folio) { - flush_dcache_page(page); + struct iomap_page *iop = to_iomap_page(folio); + flush_dcache_folio(folio); /* * The blocks that were entirely written will now be uptodate, so we - * don't have to worry about a readpage reading them and overwriting a - * partial write. However if we have encountered a short write and only + * don't have to worry about a read_folio reading them and overwriting a + * partial write. However, if we've encountered a short write and only * partially written into a block, it will not be marked uptodate, so a - * readpage might come in and destroy our partial write. + * read_folio might come in and destroy our partial write. * - * Do the simplest thing, and just treat any short write to a non - * uptodate page as a zero-length write, and force the caller to redo - * the whole thing. + * Do the simplest thing and just treat any short write to a + * non-uptodate page as a zero-length write, and force the caller to + * redo the whole thing. */ - if (unlikely(copied < len && !PageUptodate(page))) + if (unlikely(copied < len && !folio_test_uptodate(folio))) return 0; - iomap_set_range_uptodate(page, offset_in_page(pos), len); - iomap_set_page_dirty(page); + iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); + filemap_dirty_folio(inode->i_mapping, folio); return copied; } -static int -iomap_write_end_inline(struct inode *inode, struct page *page, - struct iomap *iomap, loff_t pos, unsigned copied) +static size_t iomap_write_end_inline(const struct iomap_iter *iter, + struct folio *folio, loff_t pos, size_t copied) { + const struct iomap *iomap = &iter->iomap; void *addr; - WARN_ON_ONCE(!PageUptodate(page)); - BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data)); + WARN_ON_ONCE(!folio_test_uptodate(folio)); + BUG_ON(!iomap_inline_data_valid(iomap)); - addr = kmap_atomic(page); - memcpy(iomap->inline_data + pos, addr + pos, copied); - kunmap_atomic(addr); + flush_dcache_folio(folio); + addr = kmap_local_folio(folio, pos); + memcpy(iomap_inline_data(iomap, pos), addr, copied); + kunmap_local(addr); - mark_inode_dirty(inode); + mark_inode_dirty(iter->inode); return copied; } -static int -iomap_write_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied, - struct page *page, struct iomap *iomap, struct iomap *srcmap) +/* Returns the number of bytes copied. May be 0. Cannot be an errno. */ +static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, + size_t copied, struct folio *folio) { - const struct iomap_page_ops *page_ops = iomap->page_ops; - loff_t old_size = inode->i_size; - int ret; + const struct iomap_page_ops *page_ops = iter->iomap.page_ops; + const struct iomap *srcmap = iomap_iter_srcmap(iter); + loff_t old_size = iter->inode->i_size; + size_t ret; if (srcmap->type == IOMAP_INLINE) { - ret = iomap_write_end_inline(inode, page, iomap, pos, copied); + ret = iomap_write_end_inline(iter, folio, pos, copied); } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { - ret = block_write_end(NULL, inode->i_mapping, pos, len, copied, - page, NULL); + ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, + copied, &folio->page, NULL); } else { - ret = __iomap_write_end(inode, pos, len, copied, page); + ret = __iomap_write_end(iter->inode, pos, len, copied, folio); } /* @@ -779,31 +711,33 @@ iomap_write_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied, * preferably after I/O completion so that no stale data is exposed. */ if (pos + ret > old_size) { - i_size_write(inode, pos + ret); - iomap->flags |= IOMAP_F_SIZE_CHANGED; + i_size_write(iter->inode, pos + ret); + iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } - unlock_page(page); + folio_unlock(folio); if (old_size < pos) - pagecache_isize_extended(inode, old_size, pos); + pagecache_isize_extended(iter->inode, old_size, pos); if (page_ops && page_ops->page_done) - page_ops->page_done(inode, pos, ret, page, iomap); - put_page(page); + page_ops->page_done(iter->inode, pos, ret, &folio->page); + folio_put(folio); if (ret < len) - iomap_write_failed(inode, pos, len); + iomap_write_failed(iter->inode, pos + ret, len - ret); return ret; } -static loff_t -iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, - struct iomap *iomap, struct iomap *srcmap) +static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) { - struct iov_iter *i = data; - long status = 0; + loff_t length = iomap_length(iter); + loff_t pos = iter->pos; ssize_t written = 0; + long status = 0; + struct address_space *mapping = iter->inode->i_mapping; + unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; do { + struct folio *folio; struct page *page; unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ @@ -813,94 +747,99 @@ iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data, bytes = min_t(unsigned long, PAGE_SIZE - offset, iov_iter_count(i)); again: + status = balance_dirty_pages_ratelimited_flags(mapping, + bdp_flags); + if (unlikely(status)) + break; + if (bytes > length) bytes = length; /* - * Bring in the user page that we will copy from _first_. + * Bring in the user page that we'll copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * - * Not only is this an optimisation, but it is also required - * to check that the address is actually valid, when atomic - * usercopies are used, below. + * For async buffered writes the assumption is that the user + * page has already been faulted in. This can be optimized by + * faulting the user page. */ - if (unlikely(iov_iter_fault_in_readable(i, bytes))) { + if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { status = -EFAULT; break; } - status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, - srcmap); + status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) break; - if (mapping_writably_mapped(inode->i_mapping)) + page = folio_file_page(folio, pos >> PAGE_SHIFT); + if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); + copied = copy_page_from_iter_atomic(page, offset, bytes, i); - flush_dcache_page(page); + status = iomap_write_end(iter, pos, bytes, copied, folio); - status = iomap_write_end(inode, pos, bytes, copied, page, iomap, - srcmap); - if (unlikely(status < 0)) - break; - copied = status; + if (unlikely(copied != status)) + iov_iter_revert(i, copied - status); cond_resched(); - - iov_iter_advance(i, copied); - if (unlikely(copied == 0)) { + if (unlikely(status == 0)) { /* - * If we were unable to copy any data at all, we must - * fall back to a single segment length write. - * - * If we didn't fallback here, we could livelock - * because not all segments in the iov can be copied at - * once without a pagefault. + * A short copy made iomap_write_end() reject the + * thing entirely. Might be memory poisoning + * halfway through, might be a race with munmap, + * might be severe memory pressure. */ - bytes = min_t(unsigned long, PAGE_SIZE - offset, - iov_iter_single_seg_count(i)); + if (copied) + bytes = copied; goto again; } - pos += copied; - written += copied; - length -= copied; - - balance_dirty_pages_ratelimited(inode->i_mapping); + pos += status; + written += status; + length -= status; } while (iov_iter_count(i) && length); + if (status == -EAGAIN) { + iov_iter_revert(i, written); + return -EAGAIN; + } return written ? written : status; } ssize_t -iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter, +iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, const struct iomap_ops *ops) { - struct inode *inode = iocb->ki_filp->f_mapping->host; - loff_t pos = iocb->ki_pos, ret = 0, written = 0; + struct iomap_iter iter = { + .inode = iocb->ki_filp->f_mapping->host, + .pos = iocb->ki_pos, + .len = iov_iter_count(i), + .flags = IOMAP_WRITE, + }; + int ret; - while (iov_iter_count(iter)) { - ret = iomap_apply(inode, pos, iov_iter_count(iter), - IOMAP_WRITE, ops, iter, iomap_write_actor); - if (ret <= 0) - break; - pos += ret; - written += ret; - } + if (iocb->ki_flags & IOCB_NOWAIT) + iter.flags |= IOMAP_NOWAIT; - return written ? written : ret; + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = iomap_write_iter(&iter, i); + if (iter.pos == iocb->ki_pos) + return ret; + return iter.pos - iocb->ki_pos; } EXPORT_SYMBOL_GPL(iomap_file_buffered_write); -static loff_t -iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data, - struct iomap *iomap, struct iomap *srcmap) +static loff_t iomap_unshare_iter(struct iomap_iter *iter) { + struct iomap *iomap = &iter->iomap; + const struct iomap *srcmap = iomap_iter_srcmap(iter); + loff_t pos = iter->pos; + loff_t length = iomap_length(iter); long status = 0; - ssize_t written = 0; + loff_t written = 0; /* don't bother with blocks that are not shared to start with */ if (!(iomap->flags & IOMAP_F_SHARED)) @@ -912,20 +851,15 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data, do { unsigned long offset = offset_in_page(pos); unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); - struct page *page; + struct folio *folio; - status = iomap_write_begin(inode, pos, bytes, - IOMAP_WRITE_F_UNSHARE, &page, iomap, srcmap); + status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) return status; - status = iomap_write_end(inode, pos, bytes, bytes, page, iomap, - srcmap); - if (unlikely(status <= 0)) { - if (WARN_ON_ONCE(status == 0)) - return -EIO; - return status; - } + status = iomap_write_end(iter, pos, bytes, bytes, folio); + if (WARN_ON_ONCE(status == 0)) + return -EIO; cond_resched(); @@ -933,7 +867,7 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data, written += status; length -= status; - balance_dirty_pages_ratelimited(inode->i_mapping); + balance_dirty_pages_ratelimited(iter->inode->i_mapping); } while (length); return written; @@ -943,77 +877,59 @@ int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops) { - loff_t ret; - - while (len) { - ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL, - iomap_unshare_actor); - if (ret <= 0) - return ret; - pos += ret; - len -= ret; - } + struct iomap_iter iter = { + .inode = inode, + .pos = pos, + .len = len, + .flags = IOMAP_WRITE | IOMAP_UNSHARE, + }; + int ret; - return 0; + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = iomap_unshare_iter(&iter); + return ret; } EXPORT_SYMBOL_GPL(iomap_file_unshare); -static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset, - unsigned bytes, struct iomap *iomap, struct iomap *srcmap) +static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) { - struct page *page; - int status; - - status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap); - if (status) - return status; - - zero_user(page, offset, bytes); - mark_page_accessed(page); - - return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap); -} - -static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes, - struct iomap *iomap) -{ - return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, - iomap_sector(iomap, pos & PAGE_MASK), offset, bytes); -} - -static loff_t -iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count, - void *data, struct iomap *iomap, struct iomap *srcmap) -{ - bool *did_zero = data; + const struct iomap *srcmap = iomap_iter_srcmap(iter); + loff_t pos = iter->pos; + loff_t length = iomap_length(iter); loff_t written = 0; - int status; /* already zeroed? we're done. */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) - return count; + return length; do { - unsigned offset, bytes; - - offset = offset_in_page(pos); - bytes = min_t(loff_t, PAGE_SIZE - offset, count); + struct folio *folio; + int status; + size_t offset; + size_t bytes = min_t(u64, SIZE_MAX, length); - if (IS_DAX(inode)) - status = iomap_dax_zero(pos, offset, bytes, iomap); - else - status = iomap_zero(inode, pos, offset, bytes, iomap, - srcmap); - if (status < 0) + status = iomap_write_begin(iter, pos, bytes, &folio); + if (status) return status; + offset = offset_in_folio(folio, pos); + if (bytes > folio_size(folio) - offset) + bytes = folio_size(folio) - offset; + + folio_zero_range(folio, offset, bytes); + folio_mark_accessed(folio); + + bytes = iomap_write_end(iter, pos, bytes, bytes, folio); + if (WARN_ON_ONCE(bytes == 0)) + return -EIO; + pos += bytes; - count -= bytes; + length -= bytes; written += bytes; - if (did_zero) - *did_zero = true; - } while (count > 0); + } while (length > 0); + if (did_zero) + *did_zero = true; return written; } @@ -1021,19 +937,17 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops) { - loff_t ret; - - while (len > 0) { - ret = iomap_apply(inode, pos, len, IOMAP_ZERO, - ops, did_zero, iomap_zero_range_actor); - if (ret <= 0) - return ret; - - pos += ret; - len -= ret; - } + struct iomap_iter iter = { + .inode = inode, + .pos = pos, + .len = len, + .flags = IOMAP_ZERO, + }; + int ret; - return 0; + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = iomap_zero_iter(&iter, did_zero); + return ret; } EXPORT_SYMBOL_GPL(iomap_zero_range); @@ -1051,22 +965,21 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, } EXPORT_SYMBOL_GPL(iomap_truncate_page); -static loff_t -iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, - void *data, struct iomap *iomap, struct iomap *srcmap) +static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, + struct folio *folio) { - struct page *page = data; + loff_t length = iomap_length(iter); int ret; - if (iomap->flags & IOMAP_F_BUFFER_HEAD) { - ret = __block_write_begin_int(page, pos, length, NULL, iomap); + if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { + ret = __block_write_begin_int(folio, iter->pos, length, NULL, + &iter->iomap); if (ret) return ret; - block_commit_write(page, 0, length); + block_commit_write(&folio->page, 0, length); } else { - WARN_ON_ONCE(!PageUptodate(page)); - iomap_page_create(inode, page); - set_page_dirty(page); + WARN_ON_ONCE(!folio_test_uptodate(folio)); + folio_mark_dirty(folio); } return length; @@ -1074,53 +987,47 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length, vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) { - struct page *page = vmf->page; - struct inode *inode = file_inode(vmf->vma->vm_file); - unsigned long length; - loff_t offset; + struct iomap_iter iter = { + .inode = file_inode(vmf->vma->vm_file), + .flags = IOMAP_WRITE | IOMAP_FAULT, + }; + struct folio *folio = page_folio(vmf->page); ssize_t ret; - lock_page(page); - ret = page_mkwrite_check_truncate(page, inode); + folio_lock(folio); + ret = folio_mkwrite_check_truncate(folio, iter.inode); if (ret < 0) goto out_unlock; - length = ret; - - offset = page_offset(page); - while (length > 0) { - ret = iomap_apply(inode, offset, length, - IOMAP_WRITE | IOMAP_FAULT, ops, page, - iomap_page_mkwrite_actor); - if (unlikely(ret <= 0)) - goto out_unlock; - offset += ret; - length -= ret; - } + iter.pos = folio_pos(folio); + iter.len = ret; + while ((ret = iomap_iter(&iter, ops)) > 0) + iter.processed = iomap_folio_mkwrite_iter(&iter, folio); - wait_for_stable_page(page); + if (ret < 0) + goto out_unlock; + folio_wait_stable(folio); return VM_FAULT_LOCKED; out_unlock: - unlock_page(page); + folio_unlock(folio); return block_page_mkwrite_return(ret); } EXPORT_SYMBOL_GPL(iomap_page_mkwrite); -static void -iomap_finish_page_writeback(struct inode *inode, struct page *page, - int error) +static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, + size_t len, int error) { - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = to_iomap_page(folio); if (error) { - SetPageError(page); - mapping_set_error(inode->i_mapping, -EIO); + folio_set_error(folio); + mapping_set_error(inode->i_mapping, error); } - WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); - WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0); + WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop); + WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0); - if (!iop || atomic_dec_and_test(&iop->write_count)) - end_page_writeback(page); + if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending)) + folio_end_writeback(folio); } /* @@ -1128,7 +1035,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page, * state, release holds on bios, and finally free up memory. Do not use the * ioend after this. */ -static void +static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error) { struct inode *inode = ioend->io_inode; @@ -1137,10 +1044,10 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) u64 start = bio->bi_iter.bi_sector; loff_t offset = ioend->io_offset; bool quiet = bio_flagged(bio, BIO_QUIET); + u32 folio_count = 0; for (bio = &ioend->io_inline_bio; bio; bio = next) { - struct bio_vec *bv; - struct bvec_iter_all iter_all; + struct folio_iter fi; /* * For the last bio, bi_private points to the ioend, so we @@ -1151,9 +1058,12 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) else next = bio->bi_private; - /* walk each page on bio, ending page IO on them */ - bio_for_each_segment_all(bv, bio, iter_all) - iomap_finish_page_writeback(inode, bv->bv_page, error); + /* walk all folios in bio, ending page IO on them */ + bio_for_each_folio_all(fi, bio) { + iomap_finish_folio_write(inode, fi.folio, fi.length, + error); + folio_count++; + } bio_put(bio); } /* The ioend has been freed by bio_put() */ @@ -1163,20 +1073,36 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) "%s: writeback error on inode %lu, offset %lld, sector %llu", inode->i_sb->s_id, inode->i_ino, offset, start); } + return folio_count; } +/* + * Ioend completion routine for merged bios. This can only be called from task + * contexts as merged ioends can be of unbound length. Hence we have to break up + * the writeback completions into manageable chunks to avoid long scheduler + * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get + * good batch processing throughput without creating adverse scheduler latency + * conditions. + */ void iomap_finish_ioends(struct iomap_ioend *ioend, int error) { struct list_head tmp; + u32 completions; + + might_sleep(); list_replace_init(&ioend->io_list, &tmp); - iomap_finish_ioend(ioend, error); + completions = iomap_finish_ioend(ioend, error); while (!list_empty(&tmp)) { + if (completions > IOEND_BATCH_SIZE * 8) { + cond_resched(); + completions = 0; + } ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); list_del_init(&ioend->io_list); - iomap_finish_ioend(ioend, error); + completions += iomap_finish_ioend(ioend, error); } } EXPORT_SYMBOL_GPL(iomap_finish_ioends); @@ -1197,13 +1123,23 @@ iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) return false; if (ioend->io_offset + ioend->io_size != next->io_offset) return false; + /* + * Do not merge physically discontiguous ioends. The filesystem + * completion functions will have to iterate the physical + * discontiguities even if we merge the ioends at a logical level, so + * we don't gain anything by merging physical discontiguities here. + * + * We cannot use bio->bi_iter.bi_sector here as it is modified during + * submission so does not point to the start sector of the bio at + * completion. + */ + if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) + return false; return true; } void -iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends, - void (*merge_private)(struct iomap_ioend *ioend, - struct iomap_ioend *next)) +iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) { struct iomap_ioend *next; @@ -1215,14 +1151,13 @@ iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends, break; list_move_tail(&next->io_list, &ioend->io_list); ioend->io_size += next->io_size; - if (next->io_private && merge_private) - merge_private(ioend, next); } } EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); static int -iomap_ioend_compare(void *priv, struct list_head *a, struct list_head *b) +iomap_ioend_compare(void *priv, const struct list_head *a, + const struct list_head *b) { struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); @@ -1252,7 +1187,7 @@ static void iomap_writepage_end_bio(struct bio *bio) * Submit the final bio for an ioend. * * If @error is non-zero, it means that we have a situation where some part of - * the submission process has failed after we have marked paged for writeback + * the submission process has failed after we've marked pages for writeback * and unlocked them. In this situation, we need to fail the bio instead of * submitting it. This typically only happens on a filesystem shutdown. */ @@ -1267,7 +1202,7 @@ iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, error = wpc->ops->prepare_ioend(ioend, error); if (error) { /* - * If we are failing the IO now, just mark the ioend with an + * If we're failing the IO now, just mark the ioend with an * error and finish it. This will run IO completion immediately * as there is only one reference to the ioend at this point in * time. @@ -1288,11 +1223,10 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend; struct bio *bio; - bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset); - bio_set_dev(bio, wpc->iomap.bdev); + bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, + REQ_OP_WRITE | wbc_to_write_flags(wbc), + GFP_NOFS, &iomap_ioend_bioset); bio->bi_iter.bi_sector = sector; - bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); - bio->bi_write_hint = inode->i_write_hint; wbc_init_bio(wbc, bio); ioend = container_of(bio, struct iomap_ioend, io_inline_bio); @@ -1301,16 +1235,17 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, ioend->io_flags = wpc->iomap.flags; ioend->io_inode = inode; ioend->io_size = 0; + ioend->io_folios = 0; ioend->io_offset = offset; - ioend->io_private = NULL; ioend->io_bio = bio; + ioend->io_sector = sector; return ioend; } /* * Allocate a new bio, and chain the old bio to the new one. * - * Note that we have to do perform the chaining in this unintuitive order + * Note that we have to perform the chaining in this unintuitive order * so that the bi_private linkage is set up in the right direction for the * traversal in iomap_finish_ioend(). */ @@ -1319,11 +1254,9 @@ iomap_chain_bio(struct bio *prev) { struct bio *new; - new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); - bio_copy_dev(new, prev);/* also copies over blkcg information */ + new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); + bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); - new->bi_opf = prev->bi_opf; - new->bi_write_hint = prev->bi_write_hint; bio_chain(prev, new); bio_get(prev); /* for iomap_finish_ioend */ @@ -1344,52 +1277,52 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, return false; if (sector != bio_end_sector(wpc->ioend->io_bio)) return false; + /* + * Limit ioend bio chain lengths to minimise IO completion latency. This + * also prevents long tight loops ending page writeback on all the + * folios in the ioend. + */ + if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) + return false; return true; } /* * Test to see if we have an existing ioend structure that we could append to - * first, otherwise finish off the current ioend and start another. + * first; otherwise finish off the current ioend and start another. */ static void -iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page, +iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, struct iomap_page *iop, struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct list_head *iolist) { - sector_t sector = iomap_sector(&wpc->iomap, offset); + sector_t sector = iomap_sector(&wpc->iomap, pos); unsigned len = i_blocksize(inode); - unsigned poff = offset & (PAGE_SIZE - 1); - bool merged, same_page = false; + size_t poff = offset_in_folio(folio, pos); - if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) { + if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { if (wpc->ioend) list_add(&wpc->ioend->io_list, iolist); - wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc); + wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); } - merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff, - &same_page); - if (iop && !same_page) - atomic_inc(&iop->write_count); - - if (!merged) { - if (bio_full(wpc->ioend->io_bio, len)) { - wpc->ioend->io_bio = - iomap_chain_bio(wpc->ioend->io_bio); - } - bio_add_page(wpc->ioend->io_bio, page, len, poff); + if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { + wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); + bio_add_folio(wpc->ioend->io_bio, folio, len, poff); } + if (iop) + atomic_add(len, &iop->write_bytes_pending); wpc->ioend->io_size += len; - wbc_account_cgroup_owner(wbc, page, len); + wbc_account_cgroup_owner(wbc, &folio->page, len); } /* * We implement an immediate ioend submission policy here to avoid needing to * chain multiple ioends and hence nest mempool allocations which can violate - * forward progress guarantees we need to provide. The current ioend we are - * adding blocks to is cached on the writepage context, and if the new block - * does not append to the cached ioend it will create a new ioend and cache that + * the forward progress guarantees we need to provide. The current ioend we're + * adding blocks to is cached in the writepage context, and if the new block + * doesn't append to the cached ioend, it will create a new ioend and cache that * instead. * * If a new ioend is created and cached, the old ioend is returned and queued @@ -1403,44 +1336,46 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page, static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct inode *inode, - struct page *page, u64 end_offset) + struct folio *folio, u64 end_pos) { - struct iomap_page *iop = to_iomap_page(page); + struct iomap_page *iop = iomap_page_create(inode, folio, 0); struct iomap_ioend *ioend, *next; unsigned len = i_blocksize(inode); - u64 file_offset; /* file offset of page */ + unsigned nblocks = i_blocks_per_folio(inode, folio); + u64 pos = folio_pos(folio); int error = 0, count = 0, i; LIST_HEAD(submit_list); - WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop); - WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0); + WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0); /* - * Walk through the page to find areas to write back. If we run off the - * end of the current map or find the current map invalid, grab a new - * one. + * Walk through the folio to find areas to write back. If we + * run off the end of the current map or find the current map + * invalid, grab a new one. */ - for (i = 0, file_offset = page_offset(page); - i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset; - i++, file_offset += len) { + for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { if (iop && !test_bit(i, iop->uptodate)) continue; - error = wpc->ops->map_blocks(wpc, inode, file_offset); + error = wpc->ops->map_blocks(wpc, inode, pos); if (error) break; + trace_iomap_writepage_map(inode, &wpc->iomap); if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) continue; if (wpc->iomap.type == IOMAP_HOLE) continue; - iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc, + iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc, &submit_list); count++; } + if (count) + wpc->ioend->io_folios++; WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); - WARN_ON_ONCE(!PageLocked(page)); - WARN_ON_ONCE(PageWriteback(page)); + WARN_ON_ONCE(!folio_test_locked(folio)); + WARN_ON_ONCE(folio_test_writeback(folio)); + WARN_ON_ONCE(folio_test_dirty(folio)); /* * We cannot cancel the ioend directly here on error. We may have @@ -1449,37 +1384,25 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, * appropriately. */ if (unlikely(error)) { + /* + * Let the filesystem know what portion of the current page + * failed to map. If the page hasn't been added to ioend, it + * won't be affected by I/O completion and we must unlock it + * now. + */ + if (wpc->ops->discard_folio) + wpc->ops->discard_folio(folio, pos); if (!count) { - /* - * If the current page hasn't been added to ioend, it - * won't be affected by I/O completions and we must - * discard and unlock it right here. - */ - if (wpc->ops->discard_page) - wpc->ops->discard_page(page); - ClearPageUptodate(page); - unlock_page(page); + folio_unlock(folio); goto done; } - - /* - * If the page was not fully cleaned, we need to ensure that the - * higher layers come back to it correctly. That means we need - * to keep the page dirty, and for WB_SYNC_ALL writeback we need - * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed - * so another attempt to write this page in this writeback sweep - * will be made. - */ - set_page_writeback_keepwrite(page); - } else { - clear_page_dirty_for_io(page); - set_page_writeback(page); } - unlock_page(page); + folio_start_writeback(folio); + folio_unlock(folio); /* - * Preserve the original error if there was one, otherwise catch + * Preserve the original error if there was one; catch * submission errors here and propagate into subsequent ioend * submissions. */ @@ -1497,32 +1420,31 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, * with a partial page truncate on a sub-page block sized filesystem. */ if (!count) - end_page_writeback(page); + folio_end_writeback(folio); done: - mapping_set_error(page->mapping, error); + mapping_set_error(inode->i_mapping, error); return error; } /* * Write out a dirty page. * - * For delalloc space on the page we need to allocate space and flush it. - * For unwritten space on the page we need to start the conversion to + * For delalloc space on the page, we need to allocate space and flush it. + * For unwritten space on the page, we need to start the conversion to * regular allocated space. */ static int iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) { + struct folio *folio = page_folio(page); struct iomap_writepage_ctx *wpc = data; - struct inode *inode = page->mapping->host; - pgoff_t end_index; - u64 end_offset; - loff_t offset; + struct inode *inode = folio->mapping->host; + u64 end_pos, isize; - trace_iomap_writepage(inode, page, 0, 0); + trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); /* - * Refuse to write the page out if we are called from reclaim context. + * Refuse to write the folio out if we're called from reclaim context. * * This avoids stack overflows when called from deeply used stacks in * random callers for direct reclaim or memcg reclaim. We explicitly @@ -1536,17 +1458,10 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) goto redirty; /* - * Given that we do not allow direct reclaim to call us, we should - * never be called in a recursive filesystem reclaim context. - */ - if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) - goto redirty; - - /* - * Is this page beyond the end of the file? + * Is this folio beyond the end of the file? * - * The page index is less than the end_index, adjust the end_offset - * to the highest offset that this page should represent. + * The folio index is less than the end_index, adjust the end_pos + * to the highest offset that this folio should represent. * ----------------------------------------------------- * | file mapping | <EOF> | * ----------------------------------------------------- @@ -1555,11 +1470,9 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) * | desired writeback range | see else | * ---------------------------------^------------------| */ - offset = i_size_read(inode); - end_index = offset >> PAGE_SHIFT; - if (page->index < end_index) - end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT; - else { + isize = i_size_read(inode); + end_pos = folio_pos(folio) + folio_size(folio); + if (end_pos > isize) { /* * Check whether the page to write out is beyond or straddles * i_size or not. @@ -1571,28 +1484,29 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) * | | Straddles | * ---------------------------------^-----------|--------| */ - unsigned offset_into_page = offset & (PAGE_SIZE - 1); + size_t poff = offset_in_folio(folio, isize); + pgoff_t end_index = isize >> PAGE_SHIFT; /* - * Skip the page if it is fully outside i_size, e.g. due to a - * truncate operation that is in progress. We must redirty the - * page so that reclaim stops reclaiming it. Otherwise - * iomap_vm_releasepage() is called on it and gets confused. + * Skip the page if it's fully outside i_size, e.g. + * due to a truncate operation that's in progress. We've + * cleaned this page and truncate will finish things off for + * us. * - * Note that the end_index is unsigned long, it would overflow - * if the given offset is greater than 16TB on 32-bit system - * and if we do check the page is fully outside i_size or not - * via "if (page->index >= end_index + 1)" as "end_index + 1" - * will be evaluated to 0. Hence this page will be redirtied - * and be written out repeatedly which would result in an - * infinite loop, the user program that perform this operation - * will hang. Instead, we can verify this situation by checking - * if the page to write is totally beyond the i_size or if it's + * Note that the end_index is unsigned long. If the given + * offset is greater than 16TB on a 32-bit system then if we + * checked if the page is fully outside i_size with + * "if (page->index >= end_index + 1)", "end_index + 1" would + * overflow and evaluate to 0. Hence this page would be + * redirtied and written out repeatedly, which would result in + * an infinite loop; the user program performing this operation + * would hang. Instead, we can detect this situation by + * checking if the page is totally beyond i_size or if its * offset is just equal to the EOF. */ - if (page->index > end_index || - (page->index == end_index && offset_into_page == 0)) - goto redirty; + if (folio->index > end_index || + (folio->index == end_index && poff == 0)) + goto unlock; /* * The page straddles i_size. It must be zeroed out on each @@ -1602,36 +1516,20 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data) * memory is zeroed when mapped, and writes to that region are * not written out to the file." */ - zero_user_segment(page, offset_into_page, PAGE_SIZE); - - /* Adjust the end_offset to the end of file */ - end_offset = offset; + folio_zero_segment(folio, poff, folio_size(folio)); + end_pos = isize; } - return iomap_writepage_map(wpc, wbc, inode, page, end_offset); + return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); redirty: - redirty_page_for_writepage(wbc, page); - unlock_page(page); + folio_redirty_for_writepage(wbc, folio); +unlock: + folio_unlock(folio); return 0; } int -iomap_writepage(struct page *page, struct writeback_control *wbc, - struct iomap_writepage_ctx *wpc, - const struct iomap_writeback_ops *ops) -{ - int ret; - - wpc->ops = ops; - ret = iomap_do_writepage(page, wbc, wpc); - if (!wpc->ioend) - return ret; - return iomap_submit_ioend(wpc, wpc->ioend, ret); -} -EXPORT_SYMBOL_GPL(iomap_writepage); - -int iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, const struct iomap_writeback_ops *ops) |