aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-rq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-rq.c')
-rw-r--r--drivers/md/dm-rq.c82
1 files changed, 38 insertions, 44 deletions
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 3f8577e2c13b..a41209a43506 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -7,7 +7,6 @@
#include "dm-core.h"
#include "dm-rq.h"
-#include <linux/elevator.h> /* for rq_end_sector() */
#include <linux/blk-mq.h>
#define DM_MSG_PREFIX "core-rq"
@@ -44,7 +43,6 @@ unsigned dm_get_reserved_rq_based_ios(void)
return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
}
-EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
static unsigned dm_get_blk_mq_nr_hw_queues(void)
{
@@ -70,9 +68,6 @@ void dm_start_queue(struct request_queue *q)
void dm_stop_queue(struct request_queue *q)
{
- if (blk_mq_queue_stopped(q))
- return;
-
blk_mq_quiesce_queue(q);
}
@@ -146,10 +141,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md)
{
- /* nudge anyone waiting on suspend queue */
- if (unlikely(wq_has_sleeper(&md->wait)))
- wake_up(&md->wait);
-
/*
* dm_put() must be at the end of this function. See the comment above
*/
@@ -182,7 +173,7 @@ static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long mse
void dm_mq_kick_requeue_list(struct mapped_device *md)
{
- __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
+ __dm_mq_kick_requeue_list(md->queue, 0);
}
EXPORT_SYMBOL(dm_mq_kick_requeue_list);
@@ -225,9 +216,6 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors)
disable_discard(tio->md);
- else if (req_op(clone) == REQ_OP_WRITE_SAME &&
- !clone->q->limits.max_write_same_sectors)
- disable_write_same(tio->md);
else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md);
@@ -250,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
dm_requeue_original_request(tio, true);
break;
default:
- DMWARN("unimplemented target endio return value: %d", r);
+ DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
@@ -288,7 +276,8 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_mq_complete_request(rq);
+ if (likely(!blk_should_fake_timeout(rq->q)))
+ blk_mq_complete_request(rq);
}
/*
@@ -303,26 +292,13 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
dm_complete_request(rq, error);
}
-static void end_clone_request(struct request *clone, blk_status_t error)
+static enum rq_end_io_ret end_clone_request(struct request *clone,
+ blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
dm_complete_request(tio->orig, error);
-}
-
-static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
-{
- blk_status_t r;
-
- if (blk_queue_io_stat(clone->q))
- clone->rq_flags |= RQF_IO_STAT;
-
- clone->start_time_ns = ktime_get_ns();
- r = blk_insert_cloned_request(clone->q, clone);
- if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
- /* must complete clone in terms of original request */
- dm_complete_request(rq, r);
- return r;
+ return RQ_END_IO_NONE;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
@@ -344,7 +320,7 @@ static int setup_clone(struct request *clone, struct request *rq,
{
int r;
- r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
+ r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
dm_rq_bio_constructor, tio);
if (r)
return r;
@@ -403,15 +379,22 @@ static int map_request(struct dm_rq_target_io *tio)
}
/* The target has remapped the I/O so dispatch it */
- trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
+ trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- ret = dm_dispatch_clone_request(clone, rq);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
+ ret = blk_insert_cloned_request(clone);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_RESOURCE:
+ case BLK_STS_DEV_RESOURCE:
blk_rq_unprep_clone(clone);
blk_mq_cleanup_rq(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
+ default:
+ /* must complete clone in terms of original request */
+ dm_complete_request(rq, ret);
}
break;
case DM_MAPIO_REQUEUE:
@@ -426,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio)
dm_kill_unmapped_request(rq, BLK_STS_IOERR);
break;
default:
- DMWARN("unimplemented target map return value: %d", r);
+ DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
@@ -496,10 +479,23 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
struct mapped_device *md = tio->md;
struct dm_target *ti = md->immutable_target;
+ /*
+ * blk-mq's unquiesce may come from outside events, such as
+ * elevator switch, updating nr_requests or others, and request may
+ * come during suspend, so simply ask for blk-mq to requeue it.
+ */
+ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
+ return BLK_STS_RESOURCE;
+
if (unlikely(!ti)) {
int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+ struct dm_table *map;
+ map = dm_get_live_table(md, &srcu_idx);
+ if (unlikely(!map)) {
+ dm_put_live_table(md, srcu_idx);
+ return BLK_STS_RESOURCE;
+ }
ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
}
@@ -536,7 +532,6 @@ static const struct blk_mq_ops dm_mq_ops = {
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
{
- struct request_queue *q;
struct dm_target *immutable_tgt;
int err;
@@ -547,7 +542,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
md->tag_set->ops = &dm_mq_ops;
md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
md->tag_set->numa_node = md->numa_node_id;
- md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
md->tag_set->driver_data = md;
@@ -563,18 +558,16 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
if (err)
goto out_kfree_tag_set;
- q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
- if (IS_ERR(q)) {
- err = PTR_ERR(q);
+ err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
+ if (err)
goto out_tag_set;
- }
-
return 0;
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
kfree(md->tag_set);
+ md->tag_set = NULL;
return err;
}
@@ -584,6 +577,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
if (md->tag_set) {
blk_mq_free_tag_set(md->tag_set);
kfree(md->tag_set);
+ md->tag_set = NULL;
}
}