aboutsummaryrefslogtreecommitdiffstats
path: root/fs/iomap
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2020-09-21 08:58:40 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2020-09-21 08:59:26 -0700
commit0a195b91e8991367a94ee199f3af7faa7607e7db (patch)
tree25e7c055e0621e64a0e09479cd505dd1a6083168 /fs/iomap
parentiomap: Use bitmap ops to set uptodate bits (diff)
downloadlinux-dev-0a195b91e8991367a94ee199f3af7faa7607e7db.tar.xz
linux-dev-0a195b91e8991367a94ee199f3af7faa7607e7db.zip
iomap: Support arbitrarily many blocks per page
Size the uptodate array dynamically to support larger pages in the page cache. With a 64kB page, we're only saving 8 bytes per page today, but with a 2MB maximum page size, we'd have to allocate more than 4kB per page. Add a few debugging assertions. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/iomap')
-rw-r--r--fs/iomap/buffered-io.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index eab839b2be33..9f0fa495ab69 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -22,18 +22,25 @@
#include "../internal.h"
/*
- * Structure allocated for each page when block size < PAGE_SIZE to track
- * sub-page uptodate status and I/O completions.
+ * Structure allocated for each page or THP when block size < page size
+ * to track sub-page uptodate status and I/O completions.
*/
struct iomap_page {
atomic_t read_count;
atomic_t write_count;
spinlock_t uptodate_lock;
- DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+ unsigned long uptodate[];
};
static inline struct iomap_page *to_iomap_page(struct page *page)
{
+ /*
+ * per-block data is stored in the head page. Callers should
+ * not be dealing with tail pages (and if they are, they can
+ * call thp_head() first.
+ */
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+
if (page_has_private(page))
return (struct iomap_page *)page_private(page);
return NULL;
@@ -45,11 +52,13 @@ static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
struct iomap_page *iop = to_iomap_page(page);
+ unsigned int nr_blocks = i_blocks_per_page(inode, page);
- if (iop || i_blocks_per_page(inode, page) <= 1)
+ if (iop || nr_blocks <= 1)
return iop;
- iop = kzalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
+ iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
+ GFP_NOFS | __GFP_NOFAIL);
spin_lock_init(&iop->uptodate_lock);
attach_page_private(page, iop);
return iop;
@@ -59,11 +68,14 @@ static void
iomap_page_release(struct page *page)
{
struct iomap_page *iop = detach_page_private(page);
+ unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
if (!iop)
return;
WARN_ON_ONCE(atomic_read(&iop->read_count));
WARN_ON_ONCE(atomic_read(&iop->write_count));
+ WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
+ PageUptodate(page));
kfree(iop);
}