aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColy Li <colyli@suse.de>2019-06-28 19:59:28 +0800
committerJens Axboe <axboe@kernel.dk>2019-06-28 07:39:14 -0600
commite6dcbd3e6c91b7828cb305ec324eb7fd9bdea8a0 (patch)
tree3ead211d04b197c99416945b1f81ed089ea679e3
parentRevert "bcache: set CACHE_SET_IO_DISABLE in bch_cached_dev_error()" (diff)
downloadlinux-dev-e6dcbd3e6c91b7828cb305ec324eb7fd9bdea8a0.tar.xz
linux-dev-e6dcbd3e6c91b7828cb305ec324eb7fd9bdea8a0.zip
bcache: avoid flushing btree node in cache_set_flush() if io disabled
When cache_set_flush() is called for too many I/O errors detected on cache device and the cache set is retiring, inside the function it doesn't make sense to flushing cached btree nodes from c->btree_cache because CACHE_SET_IO_DISABLE is set on c->flags already and all I/Os onto cache device will be rejected. This patch checks in cache_set_flush() that whether CACHE_SET_IO_DISABLE is set. If yes, then avoids to flush the cached btree nodes to reduce more time and make cache set retiring more faster. Signed-off-by: Coly Li <colyli@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/md/bcache/super.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index ba2ad093bc80..dc6702c2c4b6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1553,13 +1553,17 @@ static void cache_set_flush(struct closure *cl)
if (!IS_ERR_OR_NULL(c->root))
list_add(&c->root->list, &c->btree_cache);
- /* Should skip this if we're unregistering because of an error */
- list_for_each_entry(b, &c->btree_cache, list) {
- mutex_lock(&b->write_lock);
- if (btree_node_dirty(b))
- __bch_btree_node_write(b, NULL);
- mutex_unlock(&b->write_lock);
- }
+ /*
+ * Avoid flushing cached nodes if cache set is retiring
+ * due to too many I/O errors detected.
+ */
+ if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
+ list_for_each_entry(b, &c->btree_cache, list) {
+ mutex_lock(&b->write_lock);
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, NULL);
+ mutex_unlock(&b->write_lock);
+ }
for_each_cache(ca, c, i)
if (ca->alloc_thread)