aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c41
1 files changed, 32 insertions, 9 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d67c95ef8d7e..515e6af9bed2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
true, duration, &io->stats_aux);
/* nudge anyone waiting on suspend queue */
- if (unlikely(waitqueue_active(&md->wait)))
+ if (unlikely(wq_has_sleeper(&md->wait)))
wake_up(&md->wait);
}
@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
__bio_clone_fast(clone, bio);
- if (unlikely(bio_integrity(bio) != NULL)) {
+ if (bio_integrity(bio)) {
int r;
if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
@@ -1339,7 +1339,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
- if (unlikely(bio_integrity(bio) != NULL))
+ if (bio_integrity(bio))
bio_integrity_trim(clone);
return 0;
@@ -1588,6 +1588,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector = bio->bi_iter.bi_sector;
}
+#define __dm_part_stat_sub(part, field, subnd) \
+ (part_stat_get(part, field) -= (subnd))
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1642,7 +1645,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
+
+ /*
+ * Adjust IO stats for each split, otherwise upon queue
+ * reentry there will be redundant IO accounting.
+ * NOTE: this is a stop-gap fix, a proper fix involves
+ * significant refactoring of DM core's bio splitting
+ * (by eliminating DM's splitting and just using bio_split)
+ */
+ part_stat_lock();
+ __dm_part_stat_sub(&dm_disk(md)->part0,
+ sectors[op_stat_group(bio_op(bio))], ci.sector_count);
+ part_stat_unlock();
+
bio_chain(b, bio);
+ trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
ret = generic_make_request(bio);
break;
}
@@ -1713,6 +1730,15 @@ out:
return ret;
}
+static blk_qc_t dm_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
+ return __process_bio(md, map, bio);
+ else
+ return __split_and_process_bio(md, map, bio);
+}
+
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
{
struct mapped_device *md = q->queuedata;
@@ -1733,10 +1759,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
return ret;
}
- if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
- ret = __process_bio(md, map, bio);
- else
- ret = __split_and_process_bio(md, map, bio);
+ ret = dm_process_bio(md, map, bio);
dm_put_live_table(md, srcu_idx);
return ret;
@@ -2415,9 +2438,9 @@ static void dm_wq_work(struct work_struct *work)
break;
if (dm_request_based(md))
- generic_make_request(c);
+ (void) generic_make_request(c);
else
- __split_and_process_bio(md, map, c);
+ (void) dm_process_bio(md, map, c);
}
dm_put_live_table(md, srcu_idx);