aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authortang.junhui <tang.junhui@zte.com.cn>2017-10-30 14:46:34 -0700
committerJens Axboe <axboe@kernel.dk>2017-10-30 15:57:54 -0600
commitc157313791a999646901b3e3c6888514ebc36d62 (patch)
tree8b5fef73f04ef261fcc6761950795ebd3db9cc5c /drivers/md/bcache/request.c
parentbcache: update bucket_in_use in real time (diff)
downloadlinux-dev-c157313791a999646901b3e3c6888514ebc36d62.tar.xz
linux-dev-c157313791a999646901b3e3c6888514ebc36d62.zip
bcache: fix wrong cache_misses statistics
Currently, Cache missed IOs are identified by s->cache_miss, but actually, there are many situations that missed IOs are not assigned a value for s->cache_miss in cached_dev_cache_miss(), for example, a bypassed IO (s->iop.bypass = 1), or the cache_bio allocate failed. In these situations, it will go to out_put or out_submit, and s->cache_miss is null, which leads bch_mark_cache_accounting() to treat this IO as a hit IO. [ML: applied by 3-way merge] Signed-off-by: tang.junhui <tang.junhui@zte.com.cn> Reviewed-by: Michael Lyle <mlyle@lyle.org> Reviewed-by: Coly Li <colyli@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 886e4b6643f1..597dd1e87bea 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -470,6 +470,7 @@ struct search {
unsigned recoverable:1;
unsigned write:1;
unsigned read_dirty_data:1;
+ unsigned cache_missed:1;
unsigned long start_time;
@@ -656,6 +657,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->orig_bio = bio;
s->cache_miss = NULL;
+ s->cache_missed = 0;
s->d = d;
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
@@ -775,7 +777,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
- !s->cache_miss, s->iop.bypass);
+ !s->cache_missed, s->iop.bypass);
trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
if (s->iop.status)
@@ -794,6 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ s->cache_missed = 1;
+
if (s->cache_miss || s->iop.bypass) {
miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
ret = miss == bio ? MAP_DONE : MAP_CONTINUE;