aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c89
1 files changed, 34 insertions, 55 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c18fc2548518..b3c3c8b4cb42 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -476,8 +476,10 @@ static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
return -EAGAIN;
map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- return -EIO;
+ if (!map) {
+ ret = -EIO;
+ goto out;
+ }
do {
struct dm_target *tgt;
@@ -507,7 +509,6 @@ out:
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
- __acquires(md->io_barrier)
{
struct dm_target *tgt;
struct dm_table *map;
@@ -541,7 +542,6 @@ retry:
}
static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
- __releases(md->io_barrier)
{
dm_put_live_table(md, srcu_idx);
}
@@ -570,7 +570,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
}
}
- r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ if (!bdev->bd_disk->fops->ioctl)
+ r = -ENOTTY;
+ else
+ r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
out:
dm_unprepare_ioctl(md, srcu_idx);
return r;
@@ -1037,15 +1040,18 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
sector_t max_len;
/*
- * Does the target need to split even further?
- * - q->limits.chunk_sectors reflects ti->max_io_len so
- * blk_max_size_offset() provides required splitting.
- * - blk_max_size_offset() also respects q->limits.max_sectors
+ * Does the target need to split IO even further?
+ * - varied (per target) IO splitting is a tenet of DM; this
+ * explains why stacked chunk_sectors based splitting via
+ * blk_max_size_offset() isn't possible here. So pass in
+ * ti->max_io_len to override stacked chunk_sectors.
*/
- max_len = blk_max_size_offset(ti->table->md->queue,
- target_offset);
- if (len > max_len)
- len = max_len;
+ if (ti->max_io_len) {
+ max_len = blk_max_size_offset(ti->table->md->queue,
+ target_offset, ti->max_io_len);
+ if (len > max_len)
+ len = max_len;
+ }
return len;
}
@@ -1196,11 +1202,9 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
* ->zero_page_range() is mandatory dax operation. If we are
* here, something is wrong.
*/
- dm_put_live_table(md, srcu_idx);
goto out;
}
ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
-
out:
dm_put_live_table(md, srcu_idx);
@@ -1273,8 +1277,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
- trace_block_bio_remap(clone->bi_disk->queue, clone,
- bio_dev(io->orig_bio), sector);
+ trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
ret = submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
@@ -1419,18 +1422,12 @@ static int __send_empty_flush(struct clone_info *ci)
*/
bio_init(&flush_bio, NULL, 0);
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ flush_bio.bi_disk = ci->io->md->disk;
+ bio_associate_blkg(&flush_bio);
+
ci->bio = &flush_bio;
ci->sector_count = 0;
- /*
- * Empty flush uses a statically initialized bio, as the base for
- * cloning. However, blkg association requires that a bdev is
- * associated with a gendisk, which doesn't happen until the bdev is
- * opened. So, blkg association is done at issue time of the flush
- * rather than when the device is created in alloc_dev().
- */
- bio_set_dev(ci->bio, ci->io->md->bdev);
-
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1589,7 +1586,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
- if (current->bio_list && ci.sector_count && !error) {
+ if (ci.sector_count && !error) {
/*
* Remainder must be passed to submit_bio_noacct()
* so that it gets handled *after* bios already submitted
@@ -1610,12 +1607,12 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* (by eliminating DM's splitting and just using bio_split)
*/
part_stat_lock();
- __dm_part_stat_sub(&dm_disk(md)->part0,
+ __dm_part_stat_sub(dm_disk(md)->part0,
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
part_stat_unlock();
bio_chain(b, bio);
- trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
+ trace_block_split(b, bio->bi_iter.bi_sector);
ret = submit_bio_noacct(bio);
break;
}
@@ -1747,11 +1744,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
cleanup_srcu_struct(&md->io_barrier);
- if (md->bdev) {
- bdput(md->bdev);
- md->bdev = NULL;
- }
-
mutex_destroy(&md->suspend_lock);
mutex_destroy(&md->type_lock);
mutex_destroy(&md->table_devices_lock);
@@ -1843,10 +1835,6 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->wq)
goto bad;
- md->bdev = bdget_disk(md->disk, 0);
- if (!md->bdev)
- goto bad;
-
dm_stats_init(&md->stats);
/* Populate the mapping, nobody knows we exist yet */
@@ -1971,8 +1959,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
- set_capacity(md->disk, size);
- bd_set_nr_sectors(md->bdev, size);
+ set_capacity_and_notify(md->disk, size);
dm_table_event_callback(t, event_callback, md);
@@ -2255,7 +2242,7 @@ EXPORT_SYMBOL_GPL(dm_put);
static bool md_in_flight_bios(struct mapped_device *md)
{
int cpu;
- struct hd_struct *part = &dm_disk(md)->part0;
+ struct block_device *part = dm_disk(md)->part0;
long sum = 0;
for_each_possible_cpu(cpu) {
@@ -2390,27 +2377,19 @@ static int lock_fs(struct mapped_device *md)
{
int r;
- WARN_ON(md->frozen_sb);
-
- md->frozen_sb = freeze_bdev(md->bdev);
- if (IS_ERR(md->frozen_sb)) {
- r = PTR_ERR(md->frozen_sb);
- md->frozen_sb = NULL;
- return r;
- }
-
- set_bit(DMF_FROZEN, &md->flags);
+ WARN_ON(test_bit(DMF_FROZEN, &md->flags));
- return 0;
+ r = freeze_bdev(md->disk->part0);
+ if (!r)
+ set_bit(DMF_FROZEN, &md->flags);
+ return r;
}
static void unlock_fs(struct mapped_device *md)
{
if (!test_bit(DMF_FROZEN, &md->flags))
return;
-
- thaw_bdev(md->bdev, md->frozen_sb);
- md->frozen_sb = NULL;
+ thaw_bdev(md->disk->part0);
clear_bit(DMF_FROZEN, &md->flags);
}