aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/md/bcache/io.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-09-24 16:26:05 -0700
committerKent Overstreet <kmo@daterainc.com>2013-11-23 22:33:51 -0800
commit458b76ed2f9517becb74dcc8eedd70d3068ea6e4 (patch)
treeb01b2150b197e0759b3ba59c0f9367d4477ebb8c /drivers/md/bcache/io.c
parentbio-integrity: Convert to bvec_iter (diff)
downloadwireguard-linux-458b76ed2f9517becb74dcc8eedd70d3068ea6e4.tar.xz
wireguard-linux-458b76ed2f9517becb74dcc8eedd70d3068ea6e4.zip
block: Kill bio_segments()/bi_vcnt usage
When we start sharing biovecs, keeping bi_vcnt accurate for splits is going to be error prone - and unnecessary, if we refactor some code. So bio_segments() has to go - but most of the existing users just needed to know if the bio had multiple segments, which is easier - add a bio_multiple_segments() for them. (Two of the current uses of bio_segments() are going to go away in a couple patches, but the current implementation of bio_segments() is unsafe as soon as we start doing driver conversions for immutable biovecs - so implement a dumb version for bisectability, it'll go away in a couple patches) Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Neil Brown <neilb@suse.de> Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Diffstat (limited to 'drivers/md/bcache/io.c')
-rw-r--r--drivers/md/bcache/io.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9b5b6a41a9b6..6e04f3bb0286 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -24,7 +24,8 @@ static void bch_generic_make_request_hack(struct bio *bio)
if (bio->bi_iter.bi_idx) {
struct bio_vec bv;
struct bvec_iter iter;
- struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
+ unsigned segs = bio_segments(bio);
+ struct bio *clone = bio_alloc(GFP_NOIO, segs);
bio_for_each_segment(bv, bio, iter)
clone->bi_io_vec[clone->bi_vcnt++] = bv;
@@ -32,7 +33,7 @@ static void bch_generic_make_request_hack(struct bio *bio)
clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
clone->bi_bdev = bio->bi_bdev;
clone->bi_rw = bio->bi_rw;
- clone->bi_vcnt = bio_segments(bio);
+ clone->bi_vcnt = segs;
clone->bi_iter.bi_size = bio->bi_iter.bi_size;
clone->bi_private = bio;
@@ -133,40 +134,32 @@ out:
static unsigned bch_bio_max_sectors(struct bio *bio)
{
- unsigned ret = bio_sectors(bio);
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
- queue_max_segments(q));
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned ret = 0, seg = 0;
if (bio->bi_rw & REQ_DISCARD)
- return min(ret, q->limits.max_discard_sectors);
-
- if (bio_segments(bio) > max_segments ||
- q->merge_bvec_fn) {
- struct bio_vec bv;
- struct bvec_iter iter;
- unsigned seg = 0;
-
- ret = 0;
+ return min(bio_sectors(bio), q->limits.max_discard_sectors);
- bio_for_each_segment(bv, bio, iter) {
- struct bvec_merge_data bvm = {
- .bi_bdev = bio->bi_bdev,
- .bi_sector = bio->bi_iter.bi_sector,
- .bi_size = ret << 9,
- .bi_rw = bio->bi_rw,
- };
-
- if (seg == max_segments)
- break;
+ bio_for_each_segment(bv, bio, iter) {
+ struct bvec_merge_data bvm = {
+ .bi_bdev = bio->bi_bdev,
+ .bi_sector = bio->bi_iter.bi_sector,
+ .bi_size = ret << 9,
+ .bi_rw = bio->bi_rw,
+ };
+
+ if (seg == min_t(unsigned, BIO_MAX_PAGES,
+ queue_max_segments(q)))
+ break;
- if (q->merge_bvec_fn &&
- q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
- break;
+ if (q->merge_bvec_fn &&
+ q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
+ break;
- seg++;
- ret += bv.bv_len >> 9;
- }
+ seg++;
+ ret += bv.bv_len >> 9;
}
ret = min(ret, queue_max_sectors(q));