aboutsummaryrefslogtreecommitdiffstats
path: root/fs/iomap/buffered-io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/iomap/buffered-io.c')
-rw-r--r--fs/iomap/buffered-io.c194
1 files changed, 82 insertions, 112 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index bcfc288dba3f..8180061b9e16 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -22,18 +22,25 @@
#include "../internal.h"
/*
- * Structure allocated for each page when block size < PAGE_SIZE to track
- * sub-page uptodate status and I/O completions.
+ * Structure allocated for each page or THP when block size < page size
+ * to track sub-page uptodate status and I/O completions.
*/
struct iomap_page {
- atomic_t read_count;
- atomic_t write_count;
+ atomic_t read_bytes_pending;
+ atomic_t write_bytes_pending;
spinlock_t uptodate_lock;
- DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+ unsigned long uptodate[];
};
static inline struct iomap_page *to_iomap_page(struct page *page)
{
+ /*
+ * per-block data is stored in the head page. Callers should
+ * not be dealing with tail pages (and if they are, they can
+ * call thp_head() first.
+ */
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+
if (page_has_private(page))
return (struct iomap_page *)page_private(page);
return NULL;
@@ -45,20 +52,16 @@ static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
struct iomap_page *iop = to_iomap_page(page);
+ unsigned int nr_blocks = i_blocks_per_page(inode, page);
- if (iop || i_blocksize(inode) == PAGE_SIZE)
+ if (iop || nr_blocks <= 1)
return iop;
- iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
- atomic_set(&iop->read_count, 0);
- atomic_set(&iop->write_count, 0);
+ iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
+ GFP_NOFS | __GFP_NOFAIL);
spin_lock_init(&iop->uptodate_lock);
- bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
-
- /*
- * migrate_page_move_mapping() assumes that pages with private data have
- * their count elevated by 1.
- */
+ if (PageUptodate(page))
+ bitmap_fill(iop->uptodate, nr_blocks);
attach_page_private(page, iop);
return iop;
}
@@ -67,11 +70,14 @@ static void
iomap_page_release(struct page *page)
{
struct iomap_page *iop = detach_page_private(page);
+ unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
if (!iop)
return;
- WARN_ON_ONCE(atomic_read(&iop->read_count));
- WARN_ON_ONCE(atomic_read(&iop->write_count));
+ WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
+ WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
+ WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
+ PageUptodate(page));
kfree(iop);
}
@@ -142,19 +148,11 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
struct inode *inode = page->mapping->host;
unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits;
- bool uptodate = true;
unsigned long flags;
- unsigned int i;
spin_lock_irqsave(&iop->uptodate_lock, flags);
- for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
- if (i >= first && i <= last)
- set_bit(i, iop->uptodate);
- else if (!test_bit(i, iop->uptodate))
- uptodate = false;
- }
-
- if (uptodate)
+ bitmap_set(iop->uptodate, first, last - first + 1);
+ if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
SetPageUptodate(page);
spin_unlock_irqrestore(&iop->uptodate_lock, flags);
}
@@ -172,13 +170,6 @@ iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
}
static void
-iomap_read_finish(struct iomap_page *iop, struct page *page)
-{
- if (!iop || atomic_dec_and_test(&iop->read_count))
- unlock_page(page);
-}
-
-static void
iomap_read_page_end_io(struct bio_vec *bvec, int error)
{
struct page *page = bvec->bv_page;
@@ -191,7 +182,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
}
- iomap_read_finish(iop, page);
+ if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
+ unlock_page(page);
}
static void
@@ -271,30 +263,19 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
ctx->cur_page_in_bio = true;
+ if (iop)
+ atomic_add(plen, &iop->read_bytes_pending);
- /*
- * Try to merge into a previous segment if we can.
- */
+ /* Try to merge into a previous segment if we can */
sector = iomap_sector(iomap, pos);
- if (ctx->bio && bio_end_sector(ctx->bio) == sector)
+ if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
+ if (__bio_try_merge_page(ctx->bio, page, plen, poff,
+ &same_page))
+ goto done;
is_contig = true;
-
- if (is_contig &&
- __bio_try_merge_page(ctx->bio, page, plen, poff, &same_page)) {
- if (!same_page && iop)
- atomic_inc(&iop->read_count);
- goto done;
}
- /*
- * If we start a new segment we need to increase the read count, and we
- * need to do so before submitting any previous full bio to make sure
- * that we don't prematurely unlock the page.
- */
- if (iop)
- atomic_inc(&iop->read_count);
-
- if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
+ if (!is_contig || bio_full(ctx->bio, plen)) {
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -571,13 +552,13 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
{
struct iomap_page *iop = iomap_page_create(inode, page);
loff_t block_size = i_blocksize(inode);
- loff_t block_start = pos & ~(block_size - 1);
- loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
+ loff_t block_start = round_down(pos, block_size);
+ loff_t block_end = round_up(pos + len, block_size);
unsigned from = offset_in_page(pos), to = from + len, poff, plen;
- int status;
if (PageUptodate(page))
return 0;
+ ClearPageError(page);
do {
iomap_adjust_read_range(inode, iop, &block_start,
@@ -594,14 +575,13 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
if (WARN_ON_ONCE(flags & IOMAP_WRITE_F_UNSHARE))
return -EIO;
zero_user_segments(page, poff, from, to, poff + plen);
- iomap_set_range_uptodate(page, poff, plen);
- continue;
+ } else {
+ int status = iomap_read_page_sync(block_start, page,
+ poff, plen, srcmap);
+ if (status)
+ return status;
}
-
- status = iomap_read_page_sync(block_start, page, poff, plen,
- srcmap);
- if (status)
- return status;
+ iomap_set_range_uptodate(page, poff, plen);
} while ((block_start += plen) < block_end);
return 0;
@@ -685,9 +665,8 @@ iomap_set_page_dirty(struct page *page)
}
EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
-static int
-__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
- unsigned copied, struct page *page)
+static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+ size_t copied, struct page *page)
{
flush_dcache_page(page);
@@ -709,15 +688,15 @@ __iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
return copied;
}
-static int
-iomap_write_end_inline(struct inode *inode, struct page *page,
- struct iomap *iomap, loff_t pos, unsigned copied)
+static size_t iomap_write_end_inline(struct inode *inode, struct page *page,
+ struct iomap *iomap, loff_t pos, size_t copied)
{
void *addr;
WARN_ON_ONCE(!PageUptodate(page));
BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
+ flush_dcache_page(page);
addr = kmap_atomic(page);
memcpy(iomap->inline_data + pos, addr + pos, copied);
kunmap_atomic(addr);
@@ -726,13 +705,14 @@ iomap_write_end_inline(struct inode *inode, struct page *page,
return copied;
}
-static int
-iomap_write_end(struct inode *inode, loff_t pos, unsigned len, unsigned copied,
- struct page *page, struct iomap *iomap, struct iomap *srcmap)
+/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
+static size_t iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+ size_t copied, struct page *page, struct iomap *iomap,
+ struct iomap *srcmap)
{
const struct iomap_page_ops *page_ops = iomap->page_ops;
loff_t old_size = inode->i_size;
- int ret;
+ size_t ret;
if (srcmap->type == IOMAP_INLINE) {
ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
@@ -811,13 +791,8 @@ again:
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
- flush_dcache_page(page);
-
- status = iomap_write_end(inode, pos, bytes, copied, page, iomap,
+ copied = iomap_write_end(inode, pos, bytes, copied, page, iomap,
srcmap);
- if (unlikely(status < 0))
- break;
- copied = status;
cond_resched();
@@ -891,11 +866,8 @@ iomap_unshare_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
status = iomap_write_end(inode, pos, bytes, bytes, page, iomap,
srcmap);
- if (unlikely(status <= 0)) {
- if (WARN_ON_ONCE(status == 0))
- return -EIO;
- return status;
- }
+ if (WARN_ON_ONCE(status == 0))
+ return -EIO;
cond_resched();
@@ -928,11 +900,13 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
-static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
- unsigned bytes, struct iomap *iomap, struct iomap *srcmap)
+static s64 iomap_zero(struct inode *inode, loff_t pos, u64 length,
+ struct iomap *iomap, struct iomap *srcmap)
{
struct page *page;
int status;
+ unsigned offset = offset_in_page(pos);
+ unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap, srcmap);
if (status)
@@ -944,38 +918,33 @@ static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
return iomap_write_end(inode, pos, bytes, bytes, page, iomap, srcmap);
}
-static loff_t
-iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
- void *data, struct iomap *iomap, struct iomap *srcmap)
+static loff_t iomap_zero_range_actor(struct inode *inode, loff_t pos,
+ loff_t length, void *data, struct iomap *iomap,
+ struct iomap *srcmap)
{
bool *did_zero = data;
loff_t written = 0;
- int status;
/* already zeroed? we're done. */
if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
- return count;
+ return length;
do {
- unsigned offset, bytes;
-
- offset = offset_in_page(pos);
- bytes = min_t(loff_t, PAGE_SIZE - offset, count);
+ s64 bytes;
if (IS_DAX(inode))
- status = dax_iomap_zero(pos, offset, bytes, iomap);
+ bytes = dax_iomap_zero(pos, length, iomap);
else
- status = iomap_zero(inode, pos, offset, bytes, iomap,
- srcmap);
- if (status < 0)
- return status;
+ bytes = iomap_zero(inode, pos, length, iomap, srcmap);
+ if (bytes < 0)
+ return bytes;
pos += bytes;
- count -= bytes;
+ length -= bytes;
written += bytes;
if (did_zero)
*did_zero = true;
- } while (count > 0);
+ } while (length > 0);
return written;
}
@@ -1070,7 +1039,7 @@ EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
static void
iomap_finish_page_writeback(struct inode *inode, struct page *page,
- int error)
+ int error, unsigned int len)
{
struct iomap_page *iop = to_iomap_page(page);
@@ -1079,10 +1048,10 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
mapping_set_error(inode->i_mapping, -EIO);
}
- WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_count) <= 0);
+ WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
- if (!iop || atomic_dec_and_test(&iop->write_count))
+ if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
end_page_writeback(page);
}
@@ -1116,7 +1085,8 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
/* walk each page on bio, ending page IO on them */
bio_for_each_segment_all(bv, bio, iter_all)
- iomap_finish_page_writeback(inode, bv->bv_page, error);
+ iomap_finish_page_writeback(inode, bv->bv_page, error,
+ bv->bv_len);
bio_put(bio);
}
/* The ioend has been freed by bio_put() */
@@ -1332,8 +1302,8 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
&same_page);
- if (iop && !same_page)
- atomic_inc(&iop->write_count);
+ if (iop)
+ atomic_add(len, &iop->write_bytes_pending);
if (!merged) {
if (bio_full(wpc->ioend->io_bio, len)) {
@@ -1375,8 +1345,8 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
- WARN_ON_ONCE(i_blocksize(inode) < PAGE_SIZE && !iop);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_count) != 0);
+ WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+ WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
/*
* Walk through the page to find areas to write back. If we run off the