aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2020-07-25 20:00:31 +0800
committerJens Axboe <axboe@kernel.dk>2020-07-25 07:38:20 -0600
commit21e478ddb29372158746185cc75f5c8e2f4a679a (patch)
tree58421e41f8fd876dfc3a8373806f223ed4e3d247 /drivers/md
parentbcache: introduce meta_bucket_pages() related helper routines (diff)
downloadlinux-dev-21e478ddb29372158746185cc75f5c8e2f4a679a.tar.xz
linux-dev-21e478ddb29372158746185cc75f5c8e2f4a679a.zip
bcache: handle c->uuids properly for bucket size > 8MB
Bcache allocates a whole bucket to store c->uuids on cache device, and allocates continuous pages to store it in-memory. When the bucket size exceeds maximum allocable continuous pages, bch_cache_set_alloc() will fail and cache device registration will fail. This patch allocates c->uuids by alloc_meta_bucket_pages(), and uses ilog2(meta_bucket_pages(c)) to indicate order of c->uuids pages when free it. When writing c->uuids to cache device, its size is decided by meta_bucket_pages(c) * PAGE_SECTORS. Now c->uuids is properly handled for bucket size > 8MB. Signed-off-by: Coly Li <colyli@suse.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/super.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f2bab575b3cf..2ddc41c201ea 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -466,6 +466,7 @@ static int __uuid_write(struct cache_set *c)
BKEY_PADDED(key) k;
struct closure cl;
struct cache *ca;
+ unsigned int size;
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
@@ -473,7 +474,8 @@ static int __uuid_write(struct cache_set *c)
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
return 1;
- SET_KEY_SIZE(&k.key, c->sb.bucket_size);
+ size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
+ SET_KEY_SIZE(&k.key, size);
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
@@ -1664,7 +1666,7 @@ static void cache_set_free(struct closure *cl)
}
bch_bset_sort_state_free(&c->sort);
- free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
+ free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq);
@@ -1870,7 +1872,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
- c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
+ c->nr_uuids = meta_bucket_bytes(&c->sb) / sizeof(struct uuid_entry);
c->devices_max_used = 0;
atomic_set(&c->attached_dev_nr, 0);
c->btree_pages = bucket_pages(c);
@@ -1921,7 +1923,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err;
- c->uuids = alloc_bucket_pages(GFP_KERNEL, c);
+ c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, &c->sb);
if (!c->uuids)
goto err;