aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid5.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2017-04-05 14:05:51 +1000
committerShaohua Li <shli@fb.com>2017-04-11 10:16:50 -0700
commitdd7a8f5dee81ffb1794df1103f07c63fd4f1d766 (patch)
tree75548d262b86202ad305a47491522c499098c783 /drivers/md/raid5.c
parentmd/raid10: simplify handle_read_error() (diff)
downloadlinux-dev-dd7a8f5dee81ffb1794df1103f07c63fd4f1d766.tar.xz
linux-dev-dd7a8f5dee81ffb1794df1103f07c63fd4f1d766.zip
md/raid5: make chunk_aligned_read() split bios more cleanly.
chunk_aligned_read() currently uses fs_bio_set - which is meant for filesystems to use - and loops if multiple splits are needed, which is not best practice. As this is only used for READ requests, not writes, it is unlikely to cause a problem. However it is best to be consistent in how we split bios, and to follow the pattern used in raid1/raid10. So create a private bioset, bio_split, and use it to perform a single split, submitting the remainder to generic_make_request() for later processing. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid5.c')
-rw-r--r--drivers/md/raid5.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f3692ff4262b..356cd9c7c753 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5246,24 +5246,20 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{
struct bio *split;
+ sector_t sector = raid_bio->bi_iter.bi_sector;
+ unsigned chunk_sects = mddev->chunk_sectors;
+ unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
- do {
- sector_t sector = raid_bio->bi_iter.bi_sector;
- unsigned chunk_sects = mddev->chunk_sectors;
- unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
-
- if (sectors < bio_sectors(raid_bio)) {
- split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
- bio_chain(split, raid_bio);
- } else
- split = raid_bio;
+ if (sectors < bio_sectors(raid_bio)) {
+ struct r5conf *conf = mddev->private;
+ split = bio_split(raid_bio, sectors, GFP_NOIO, conf->bio_split);
+ bio_chain(split, raid_bio);
+ generic_make_request(raid_bio);
+ raid_bio = split;
+ }
- if (!raid5_read_one_chunk(mddev, split)) {
- if (split != raid_bio)
- generic_make_request(raid_bio);
- return split;
- }
- } while (split != raid_bio);
+ if (!raid5_read_one_chunk(mddev, raid_bio))
+ return raid_bio;
return NULL;
}
@@ -6747,6 +6743,8 @@ static void free_conf(struct r5conf *conf)
if (conf->disks[i].extra_page)
put_page(conf->disks[i].extra_page);
kfree(conf->disks);
+ if (conf->bio_split)
+ bioset_free(conf->bio_split);
kfree(conf->stripe_hashtbl);
kfree(conf->pending_data);
kfree(conf);
@@ -6922,6 +6920,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort;
}
+ conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+ if (!conf->bio_split)
+ goto abort;
conf->mddev = mddev;
if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)