aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c195
1 files changed, 72 insertions, 123 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 820d8402a1dc..3427555b0cca 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -44,10 +44,10 @@ static void bio_csum(struct bio *bio, struct bkey *k)
uint64_t csum = 0;
bio_for_each_segment(bv, bio, iter) {
- void *d = kmap(bv.bv_page) + bv.bv_offset;
+ void *d = bvec_kmap_local(&bv);
- csum = bch_crc64_update(csum, d, bv.bv_len);
- kunmap(bv.bv_page);
+ csum = crc64_be(csum, d, bv.bv_len);
+ kunmap_local(d);
}
k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -99,7 +99,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
* bch_data_insert_keys() will insert the keys created so far
* and finish the rest when the keylist is empty.
*/
- if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
+ if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset))
return -ENOMEM;
return __bch_keylist_realloc(l, u64s);
@@ -110,7 +110,7 @@ static void bch_data_invalidate(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio;
- pr_debug("invalidating %i sectors from %llu",
+ pr_debug("invalidating %i sectors from %llu\n",
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
@@ -394,14 +394,14 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip;
}
- if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
- bio_sectors(bio) & (c->sb.block_size - 1)) {
- pr_debug("skipping unaligned io");
+ if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
+ bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
+ pr_debug("skipping unaligned io\n");
goto skip;
}
if (bypass_torture_test(dc)) {
- if ((get_random_int() & 3) == 3)
+ if (prandom_u32_max(4) == 3)
goto skip;
else
goto rescale;
@@ -475,6 +475,7 @@ struct search {
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
+ struct block_device *orig_bdev;
unsigned long start_time;
struct btree_op op;
@@ -650,8 +651,8 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
- pr_err("Can't flush %s: returned bi_status %i",
- dc->backing_dev_name, bio->bi_status);
+ pr_err("Can't flush %pg: returned bi_status %i\n",
+ dc->bdev, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status;
@@ -668,9 +669,9 @@ static void backing_request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
- &s->d->disk->part0, s->start_time);
-
+ /* Count on bcache device */
+ bio_end_io_acct_remapped(s->orig_bio, s->start_time,
+ s->orig_bdev);
trace_bcache_request_end(s->d, s->orig_bio);
s->orig_bio->bi_status = s->iop.status;
bio_endio(s->orig_bio);
@@ -684,8 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
- bio_init(bio, NULL, 0);
- __bio_clone_fast(bio, orig_bio);
+ bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,
@@ -713,7 +713,8 @@ static void search_free(struct closure *cl)
}
static inline struct search *search_alloc(struct bio *bio,
- struct bcache_device *d)
+ struct bcache_device *d, struct block_device *orig_bdev,
+ unsigned long start_time)
{
struct search *s;
@@ -730,8 +731,9 @@ static inline struct search *search_alloc(struct bio *bio,
s->recoverable = 1;
s->write = op_is_write(bio_op(bio));
s->read_dirty_data = 0;
- s->start_time = jiffies;
-
+ /* Count on the bcache device */
+ s->orig_bdev = orig_bdev;
+ s->start_time = start_time;
s->iop.c = d->c;
s->iop.bio = NULL;
s->iop.inode = d->id;
@@ -828,11 +830,11 @@ static void cached_dev_read_done(struct closure *cl)
*/
if (s->iop.bio) {
- bio_reset(s->iop.bio);
+ bio_reset(s->iop.bio, s->cache_miss->bi_bdev, REQ_OP_READ);
s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
- bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
+ bio_clone_blkg_association(s->iop.bio, s->cache_miss);
bch_bio_map(s->iop.bio, NULL);
bio_copy_data(s->cache_miss, s->iop.bio);
@@ -877,9 +879,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned int sectors)
{
int ret = MAP_CONTINUE;
- unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
+ unsigned int size_limit;
s->cache_missed = 1;
@@ -889,13 +891,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}
- if (!(bio->bi_opf & REQ_RAHEAD) &&
- !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
- s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
- reada = min_t(sector_t, dc->readahead >> 9,
- get_capacity(bio->bi_disk) - bio_end_sector(bio));
-
- s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
+ /* Limitation for valid replace key size and cache_bio bvecs number */
+ size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
+ (1 << KEY_SIZE_BITS) - 1);
+ s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
s->iop.replace_key = KEY(s->iop.inode,
bio->bi_iter.bi_sector + s->insert_bio_sectors,
@@ -907,19 +906,19 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true;
- miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
+ miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
+ &s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR;
- cache_bio = bio_alloc_bioset(GFP_NOWAIT,
+ cache_bio = bio_alloc_bioset(miss->bi_bdev,
DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
- &dc->disk.bio_split);
+ 0, GFP_NOWAIT, &dc->disk.bio_split);
if (!cache_bio)
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = backing_request_endio;
@@ -929,9 +928,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
- if (reada)
- bch_mark_cache_readahead(s->iop.c, s->d);
-
s->cache_miss = miss;
s->iop.bio = cache_bio;
bio_get(cache_bio);
@@ -1009,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
bio_get(s->iop.bio);
if (bio_op(bio) == REQ_OP_DISCARD &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ !bdev_max_discard_sectors(dc->bdev))
goto insert_data;
/* I/O request sent to backing device */
@@ -1027,21 +1023,21 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
*/
struct bio *flush;
- flush = bio_alloc_bioset(GFP_NOIO, 0,
- &dc->disk.bio_split);
+ flush = bio_alloc_bioset(bio->bi_bdev, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_NOIO, &dc->disk.bio_split);
if (!flush) {
s->iop.status = BLK_STS_RESOURCE;
goto insert_data;
}
- bio_copy_dev(flush, bio);
flush->bi_end_io = backing_request_endio;
flush->bi_private = cl;
- flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
/* I/O request sent to backing device */
closure_bio_submit(s->iop.c, flush, cl);
}
} else {
- s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
+ s->iop.bio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
+ &dc->disk.bio_split);
/* I/O request sent to backing device */
bio->bi_end_io = backing_request_endio;
closure_bio_submit(s->iop.c, bio, cl);
@@ -1072,6 +1068,7 @@ struct detached_dev_io_private {
unsigned long start_time;
bio_end_io_t *bi_end_io;
void *bi_private;
+ struct block_device *orig_bdev;
};
static void detached_dev_end_io(struct bio *bio)
@@ -1082,8 +1079,8 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io = ddip->bi_end_io;
bio->bi_private = ddip->bi_private;
- generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
- &ddip->d->disk->part0, ddip->start_time);
+ /* Count on the bcache device */
+ bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev);
if (bio->bi_status) {
struct cached_dev *dc = container_of(ddip->d,
@@ -1096,7 +1093,8 @@ static void detached_dev_end_io(struct bio *bio)
bio->bi_end_io(bio);
}
-static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
+static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
+ struct block_device *orig_bdev, unsigned long start_time)
{
struct detached_dev_io_private *ddip;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
@@ -1107,18 +1105,26 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
* which would call closure_get(&dc->disk.cl)
*/
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
+ if (!ddip) {
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio->bi_end_io(bio);
+ return;
+ }
+
ddip->d = d;
- ddip->start_time = jiffies;
+ /* Count on the bcache device */
+ ddip->orig_bdev = orig_bdev;
+ ddip->start_time = start_time;
ddip->bi_end_io = bio->bi_end_io;
ddip->bi_private = bio->bi_private;
bio->bi_end_io = detached_dev_end_io;
bio->bi_private = ddip;
if ((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(dc->bdev)))
+ !bdev_max_discard_sectors(dc->bdev))
bio->bi_end_io(bio);
else
- generic_make_request(bio);
+ submit_bio_noacct(bio);
}
static void quit_max_writeback_rate(struct cache_set *c,
@@ -1161,19 +1167,20 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-static blk_qc_t cached_dev_make_request(struct request_queue *q,
- struct bio *bio)
+void cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct block_device *orig_bdev = bio->bi_bdev;
+ struct bcache_device *d = orig_bdev->bd_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ unsigned long start_time;
int rw = bio_data_dir(bio);
if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
dc->io_disable)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
if (likely(d->c)) {
@@ -1191,22 +1198,19 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
}
}
- generic_start_io_acct(q,
- bio_op(bio),
- bio_sectors(bio),
- &d->disk->part0);
+ start_time = bio_start_io_acct(bio);
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, orig_bdev, start_time);
trace_bcache_request_start(s->d, bio);
if (!bio->bi_iter.bi_size) {
/*
* can't call bch_journal_meta from under
- * generic_make_request
+ * submit_bio_noacct
*/
continue_at_nobarrier(&s->cl,
cached_dev_nodata,
@@ -1221,9 +1225,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
}
} else
/* I/O request sent to backing device */
- detached_dev_do_request(d, bio);
-
- return BLK_QC_T_NONE;
+ detached_dev_do_request(d, bio, orig_bdev, start_time);
}
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1233,41 +1235,13 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
if (dc->io_disable)
return -EIO;
-
- return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
-}
-
-static int cached_dev_congested(void *data, int bits)
-{
- struct bcache_device *d = data;
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
- struct request_queue *q = bdev_get_queue(dc->bdev);
- int ret = 0;
-
- if (bdi_congested(q->backing_dev_info, bits))
- return 1;
-
- if (cached_dev_get(dc)) {
- unsigned int i;
- struct cache *ca;
-
- for_each_cache(ca, d->c, i) {
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- cached_dev_put(dc);
- }
-
- return ret;
+ if (!dc->bdev->bd_disk->fops->ioctl)
+ return -ENOTTY;
+ return dc->bdev->bd_disk->fops->ioctl(dc->bdev, mode, cmd, arg);
}
void bch_cached_dev_request_init(struct cached_dev *dc)
{
- struct gendisk *g = dc->disk.disk;
-
- g->queue->make_request_fn = cached_dev_make_request;
- g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
@@ -1301,22 +1275,19 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-static blk_qc_t flash_dev_make_request(struct request_queue *q,
- struct bio *bio)
+void flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
- struct bcache_device *d = bio->bi_disk->private_data;
+ struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
- generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
-
- s = search_alloc(bio, d);
+ s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
cl = &s->cl;
bio = &s->bio.bio;
@@ -1324,13 +1295,12 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
if (!bio->bi_iter.bi_size) {
/*
- * can't call bch_journal_meta from under
- * generic_make_request
+ * can't call bch_journal_meta from under submit_bio_noacct
*/
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
- return BLK_QC_T_NONE;
+ return;
} else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1346,7 +1316,6 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
}
continue_at(cl, search_free, NULL);
- return BLK_QC_T_NONE;
}
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1355,28 +1324,8 @@ static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
return -ENOTTY;
}
-static int flash_dev_congested(void *data, int bits)
-{
- struct bcache_device *d = data;
- struct request_queue *q;
- struct cache *ca;
- unsigned int i;
- int ret = 0;
-
- for_each_cache(ca, d->c, i) {
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
- }
-
- return ret;
-}
-
void bch_flash_dev_request_init(struct bcache_device *d)
{
- struct gendisk *g = d->disk;
-
- g->queue->make_request_fn = flash_dev_make_request;
- g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}