aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c43
-rw-r--r--drivers/md/dm-io.c4
-rw-r--r--drivers/md/dm-ioctl.c7
-rw-r--r--drivers/md/dm-kcopyd.c2
-rw-r--r--drivers/md/dm.c32
-rw-r--r--drivers/md/md.c34
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c19
8 files changed, 90 insertions, 54 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 35bda49796fb..bfefd079a955 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -60,6 +60,7 @@ struct dm_crypt_io {
};
struct dm_crypt_request {
+ struct convert_context *ctx;
struct scatterlist sg_in;
struct scatterlist sg_out;
};
@@ -335,6 +336,18 @@ static void crypt_convert_init(struct crypt_config *cc,
init_completion(&ctx->restart);
}
+static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
+ struct ablkcipher_request *req)
+{
+ return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
+}
+
+static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
+ struct dm_crypt_request *dmreq)
+{
+ return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
+}
+
static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
struct ablkcipher_request *req)
@@ -345,10 +358,11 @@ static int crypt_convert_block(struct crypt_config *cc,
u8 *iv;
int r = 0;
- dmreq = (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
+ dmreq = dmreq_of_req(cc, req);
iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
crypto_ablkcipher_alignmask(cc->tfm) + 1);
+ dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
bv_in->bv_offset + ctx->offset_in);
@@ -395,8 +409,9 @@ static void crypt_alloc_req(struct crypt_config *cc,
cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
ablkcipher_request_set_tfm(cc->req, cc->tfm);
ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP,
- kcryptd_async_done, ctx);
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ kcryptd_async_done,
+ dmreq_of_req(cc, cc->req));
}
/*
@@ -553,19 +568,22 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
static void crypt_dec_pending(struct dm_crypt_io *io)
{
struct crypt_config *cc = io->target->private;
+ struct bio *base_bio = io->base_bio;
+ struct dm_crypt_io *base_io = io->base_io;
+ int error = io->error;
if (!atomic_dec_and_test(&io->pending))
return;
- if (likely(!io->base_io))
- bio_endio(io->base_bio, io->error);
+ mempool_free(io, cc->io_pool);
+
+ if (likely(!base_io))
+ bio_endio(base_bio, error);
else {
- if (io->error && !io->base_io->error)
- io->base_io->error = io->error;
- crypt_dec_pending(io->base_io);
+ if (error && !base_io->error)
+ base_io->error = error;
+ crypt_dec_pending(base_io);
}
-
- mempool_free(io, cc->io_pool);
}
/*
@@ -821,7 +839,8 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
static void kcryptd_async_done(struct crypto_async_request *async_req,
int error)
{
- struct convert_context *ctx = async_req->data;
+ struct dm_crypt_request *dmreq = async_req->data;
+ struct convert_context *ctx = dmreq->ctx;
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
struct crypt_config *cc = io->target->private;
@@ -830,7 +849,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
return;
}
- mempool_free(ablkcipher_request_cast(async_req), cc->req_pool);
+ mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
if (!atomic_dec_and_test(&ctx->pending))
return;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index a34338567a2a..36e2b5e46a6b 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -292,6 +292,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
(PAGE_SIZE >> SECTOR_SHIFT));
num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
num_bvecs);
+ if (unlikely(num_bvecs > BIO_MAX_PAGES))
+ num_bvecs = BIO_MAX_PAGES;
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
@@ -328,7 +330,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
struct dpages old_pages = *dp;
if (sync)
- rw |= (1 << BIO_RW_SYNC);
+ rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
/*
* For multiple regions we need to be careful to rewind
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 54d0588fc1f6..f01096549a93 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -704,7 +704,8 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
char *new_name = (char *) param + param->data_start;
if (new_name < param->data ||
- invalid_str(new_name, (void *) param + param_size)) {
+ invalid_str(new_name, (void *) param + param_size) ||
+ strlen(new_name) > DM_NAME_LEN - 1) {
DMWARN("Invalid new logical volume name supplied.");
return -EINVAL;
}
@@ -1063,7 +1064,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
r = populate_table(t, param, param_size);
if (r) {
- dm_table_put(t);
+ dm_table_destroy(t);
goto out;
}
@@ -1071,7 +1072,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
hc = dm_get_mdptr(md);
if (!hc || hc->md != md) {
DMWARN("device has been removed from the dev hash table.");
- dm_table_put(t);
+ dm_table_destroy(t);
up_write(&_hash_lock);
r = -ENXIO;
goto out;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 3073618269ea..0a225da21272 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -344,7 +344,7 @@ static int run_io_job(struct kcopyd_job *job)
{
int r;
struct dm_io_request io_req = {
- .bi_rw = job->rw | (1 << BIO_RW_SYNC),
+ .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
.mem.offset = job->offset,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 51ba1db4b3e7..8d40f27cce89 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -525,9 +525,12 @@ static int __noflush_suspending(struct mapped_device *md)
static void dec_pending(struct dm_io *io, int error)
{
unsigned long flags;
+ int io_error;
+ struct bio *bio;
+ struct mapped_device *md = io->md;
/* Push-back supersedes any I/O errors */
- if (error && !(io->error > 0 && __noflush_suspending(io->md)))
+ if (error && !(io->error > 0 && __noflush_suspending(md)))
io->error = error;
if (atomic_dec_and_test(&io->io_count)) {
@@ -537,24 +540,27 @@ static void dec_pending(struct dm_io *io, int error)
* This must be handled before the sleeper on
* suspend queue merges the pushback list.
*/
- spin_lock_irqsave(&io->md->pushback_lock, flags);
- if (__noflush_suspending(io->md))
- bio_list_add(&io->md->pushback, io->bio);
+ spin_lock_irqsave(&md->pushback_lock, flags);
+ if (__noflush_suspending(md))
+ bio_list_add(&md->pushback, io->bio);
else
/* noflush suspend was interrupted. */
io->error = -EIO;
- spin_unlock_irqrestore(&io->md->pushback_lock, flags);
+ spin_unlock_irqrestore(&md->pushback_lock, flags);
}
end_io_acct(io);
- if (io->error != DM_ENDIO_REQUEUE) {
- trace_block_bio_complete(io->md->queue, io->bio);
+ io_error = io->error;
+ bio = io->bio;
- bio_endio(io->bio, io->error);
- }
+ free_io(md, io);
+
+ if (io_error != DM_ENDIO_REQUEUE) {
+ trace_block_bio_complete(md->queue, bio);
- free_io(io->md, io);
+ bio_endio(bio, io_error);
+ }
}
}
@@ -562,6 +568,7 @@ static void clone_endio(struct bio *bio, int error)
{
int r = 0;
struct dm_target_io *tio = bio->bi_private;
+ struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
@@ -585,15 +592,14 @@ static void clone_endio(struct bio *bio, int error)
}
}
- dec_pending(tio->io, error);
-
/*
* Store md for cleanup instead of tio which is about to get freed.
*/
bio->bi_private = md->bs;
- bio_put(bio);
free_tio(md, tio);
+ bio_put(bio);
+ dec_pending(io, error);
}
static sector_t max_io_len(struct mapped_device *md,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4495104f6c9f..a307f87eb90e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -214,12 +214,7 @@ static inline mddev_t *mddev_get(mddev_t *mddev)
return mddev;
}
-static void mddev_delayed_delete(struct work_struct *ws)
-{
- mddev_t *mddev = container_of(ws, mddev_t, del_work);
- kobject_del(&mddev->kobj);
- kobject_put(&mddev->kobj);
-}
+static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(mddev_t *mddev)
{
@@ -474,7 +469,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* causes ENOTSUPP, we allocate a spare bio...
*/
struct bio *bio = bio_alloc(GFP_NOIO, 1);
- int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
+ int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
bio->bi_bdev = rdev->bdev;
bio->bi_sector = sector;
@@ -531,7 +526,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct completion event;
int ret;
- rw |= (1 << BIO_RW_SYNC);
+ rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
bio->bi_bdev = bdev;
bio->bi_sector = sector;
@@ -3542,6 +3537,21 @@ static struct kobj_type md_ktype = {
int mdp_major = 0;
+static void mddev_delayed_delete(struct work_struct *ws)
+{
+ mddev_t *mddev = container_of(ws, mddev_t, del_work);
+
+ if (mddev->private == &md_redundancy_group) {
+ sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->sysfs_action)
+ sysfs_put(mddev->sysfs_action);
+ mddev->sysfs_action = NULL;
+ mddev->private = NULL;
+ }
+ kobject_del(&mddev->kobj);
+ kobject_put(&mddev->kobj);
+}
+
static int md_alloc(dev_t dev, char *name)
{
static DEFINE_MUTEX(disks_mutex);
@@ -4033,13 +4043,9 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
- if (mddev->pers->sync_request) {
- sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
- if (mddev->sysfs_action)
- sysfs_put(mddev->sysfs_action);
- mddev->sysfs_action = NULL;
- }
module_put(mddev->pers->owner);
+ if (mddev->pers->sync_request)
+ mddev->private = &md_redundancy_group;
mddev->pers = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent(mddev->sysfs_state);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 01e3cffd03b8..e2466425d9ca 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1237,8 +1237,9 @@ static void end_sync_write(struct bio *bio, int error)
update_head_pos(mirror, r1_bio);
if (atomic_dec_and_test(&r1_bio->remaining)) {
- md_done_sync(mddev, r1_bio->sectors, uptodate);
+ sector_t s = r1_bio->sectors;
put_buf(r1_bio);
+ md_done_sync(mddev, s, uptodate);
}
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6736d6dff981..7301631abe04 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1236,6 +1236,7 @@ static void end_sync_read(struct bio *bio, int error)
/* for reconstruct, we always reschedule after a read.
* for resync, only after all reads
*/
+ rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
atomic_dec_and_test(&r10_bio->remaining)) {
/* we have read all the blocks,
@@ -1243,7 +1244,6 @@ static void end_sync_read(struct bio *bio, int error)
*/
reschedule_retry(r10_bio);
}
- rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
}
static void end_sync_write(struct bio *bio, int error)
@@ -1264,11 +1264,13 @@ static void end_sync_write(struct bio *bio, int error)
update_head_pos(i, r10_bio);
+ rdev_dec_pending(conf->mirrors[d].rdev, mddev);
while (atomic_dec_and_test(&r10_bio->remaining)) {
if (r10_bio->master_bio == NULL) {
/* the primary of several recovery bios */
- md_done_sync(mddev, r10_bio->sectors, 1);
+ sector_t s = r10_bio->sectors;
put_buf(r10_bio);
+ md_done_sync(mddev, s, 1);
break;
} else {
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
@@ -1276,7 +1278,6 @@ static void end_sync_write(struct bio *bio, int error)
r10_bio = r10_bio2;
}
}
- rdev_dec_pending(conf->mirrors[d].rdev, mddev);
}
/*
@@ -1749,8 +1750,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
- bitmap_cond_end_sync(mddev->bitmap, sector_nr);
-
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_bdev set,
@@ -1886,6 +1885,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* resync. Schedule a read for every block at this virt offset */
int count = 0;
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
&sync_blocks, mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
@@ -2010,13 +2011,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
/* There is nowhere to write, so all non-sync
* drives must be failed, so try the next chunk...
*/
- {
- sector_t sec = max_sector - sector_nr;
- sectors_skipped += sec;
+ if (sector_nr + max_sync < max_sector)
+ max_sector = sector_nr + max_sync;
+
+ sectors_skipped += (max_sector - sector_nr);
chunks_skipped ++;
sector_nr = max_sector;
goto skipped;
- }
}
static int run(mddev_t *mddev)