aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c102
1 files changed, 44 insertions, 58 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 77b562d18a90..59d4daa5f4c7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1133,7 +1133,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
- generic_make_request(bio);
+ if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+ !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ /* Just ignore it */
+ bio_endio(bio, 0);
+ else
+ generic_make_request(bio);
bio = next;
}
kfree(plug);
@@ -1169,14 +1174,13 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/* If this request crosses a chunk boundary, we need to
* split it. This will only happen for 1 PAGE (or less) requests.
*/
- if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
+ if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
> chunk_sects
&& (conf->geo.near_copies < conf->geo.raid_disks
|| conf->prev.near_copies < conf->prev.raid_disks))) {
struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */
- if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
- bio->bi_idx != 0)
+ if (bio_segments(bio) > 1)
goto bad_map;
/* This is a one page bio that upper layers
* refuse to split for us, so we need to split it.
@@ -1209,7 +1213,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
bad_map:
printk("md/raid10:%s: make_request bug: can't convert block across chunks"
" or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
- (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
+ (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
bio_io_error(bio);
return;
@@ -1224,7 +1228,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
*/
wait_barrier(conf);
- sectors = bio->bi_size >> 9;
+ sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_sector < conf->reshape_progress &&
bio->bi_sector + sectors > conf->reshape_progress) {
@@ -1326,8 +1330,7 @@ read_again:
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
- r10_bio->sectors = ((bio->bi_size >> 9)
- - sectors_handled);
+ r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->state = 0;
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -1569,7 +1572,7 @@ retry_write:
* after checking if we need to go around again.
*/
- if (sectors_handled < (bio->bi_size >> 9)) {
+ if (sectors_handled < bio_sectors(bio)) {
one_write_done(r10_bio);
/* We need another r10_bio. It has already been counted
* in bio->bi_phys_segments.
@@ -1577,7 +1580,7 @@ retry_write:
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio;
- r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -2079,13 +2082,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* First we need to fixup bv_offset, bv_len and
* bi_vecs, as the read request might have corrupted these
*/
+ bio_reset(tbio);
+
tbio->bi_vcnt = vcnt;
tbio->bi_size = r10_bio->sectors << 9;
- tbio->bi_idx = 0;
- tbio->bi_phys_segments = 0;
- tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
- tbio->bi_flags |= 1 << BIO_UPTODATE;
- tbio->bi_next = NULL;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
tbio->bi_sector = r10_bio->devs[i].addr;
@@ -2103,7 +2103,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
- md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
+ md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -2128,7 +2128,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev,
- tbio->bi_size >> 9);
+ bio_sectors(tbio));
generic_make_request(tbio);
}
@@ -2254,13 +2254,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
wbio2 = r10_bio->devs[1].repl_bio;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+ md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio);
}
if (wbio2 && wbio2->bi_end_io) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
- wbio2->bi_size >> 9);
+ bio_sectors(wbio2));
generic_make_request(wbio2);
}
}
@@ -2531,25 +2531,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
}
}
-static void bi_complete(struct bio *bio, int error)
-{
- complete((struct completion *)bio->bi_private);
-}
-
-static int submit_bio_wait(int rw, struct bio *bio)
-{
- struct completion event;
- rw |= REQ_SYNC;
-
- init_completion(&event);
- bio->bi_private = &event;
- bio->bi_end_io = bi_complete;
- submit_bio(rw, bio);
- wait_for_completion(&event);
-
- return test_bit(BIO_UPTODATE, &bio->bi_flags);
-}
-
static int narrow_write_error(struct r10bio *r10_bio, int i)
{
struct bio *bio = r10_bio->master_bio;
@@ -2690,8 +2671,7 @@ read_more:
r10_bio = mempool_alloc(conf->r10bio_pool,
GFP_NOIO);
r10_bio->master_bio = mbio;
- r10_bio->sectors = (mbio->bi_size >> 9)
- - sectors_handled;
+ r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
r10_bio->state = 0;
set_bit(R10BIO_ReadError,
&r10_bio->state);
@@ -2913,6 +2893,22 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (init_resync(conf))
return 0;
+ /*
+ * Allow skipping a full rebuild for incremental assembly
+ * of a clean array, like RAID1 does.
+ */
+ if (mddev->bitmap == NULL &&
+ mddev->recovery_cp == MaxSector &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+ conf->fullsync == 0) {
+ *skipped = 1;
+ max_sector = mddev->dev_sectors;
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ max_sector = mddev->resync_max_sectors;
+ return max_sector - sector_nr;
+ }
+
skipped:
max_sector = mddev->dev_sectors;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -3112,6 +3108,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
bio = r10_bio->devs[0].bio;
+ bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
@@ -3136,6 +3133,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
rdev = mirror->rdev;
if (!test_bit(In_sync, &rdev->flags)) {
bio = r10_bio->devs[1].bio;
+ bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
@@ -3164,6 +3162,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (rdev == NULL || bio == NULL ||
test_bit(Faulty, &rdev->flags))
break;
+ bio_reset(bio);
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
@@ -3262,7 +3261,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[i].repl_bio->bi_end_io = NULL;
bio = r10_bio->devs[i].bio;
- bio->bi_end_io = NULL;
+ bio_reset(bio);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (conf->mirrors[d].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[d].rdev->flags))
@@ -3299,6 +3298,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* Need to set up for writing to the replacement */
bio = r10_bio->devs[i].repl_bio;
+ bio_reset(bio);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
sector = r10_bio->devs[i].addr;
@@ -3332,17 +3332,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
- for (bio = biolist; bio ; bio=bio->bi_next) {
-
- bio->bi_flags &= ~(BIO_POOL_MASK - 1);
- if (bio->bi_end_io)
- bio->bi_flags |= 1 << BIO_UPTODATE;
- bio->bi_vcnt = 0;
- bio->bi_idx = 0;
- bio->bi_phys_segments = 0;
- bio->bi_size = 0;
- }
-
nr_sectors = 0;
if (sector_nr + max_sync < max_sector)
max_sector = sector_nr + max_sync;
@@ -3810,6 +3799,7 @@ static int stop(struct mddev *mddev)
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
+ safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
mddev->private = NULL;
@@ -4389,7 +4379,6 @@ read_more:
read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
read_bio->bi_vcnt = 0;
- read_bio->bi_idx = 0;
read_bio->bi_size = 0;
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4413,17 +4402,14 @@ read_more:
}
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
continue;
+
+ bio_reset(b);
b->bi_bdev = rdev2->bdev;
b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
b->bi_private = r10_bio;
b->bi_end_io = end_reshape_write;
b->bi_rw = WRITE;
- b->bi_flags &= ~(BIO_POOL_MASK - 1);
- b->bi_flags |= 1 << BIO_UPTODATE;
b->bi_next = blist;
- b->bi_vcnt = 0;
- b->bi_idx = 0;
- b->bi_size = 0;
blist = b;
}