aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c29
1 files changed, 17 insertions, 12 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0191fc0447f7..696450257ac1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -105,7 +105,7 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{
struct mq_inflight *mi = priv;
- if (rq->part == mi->part)
+ if (rq->part == mi->part && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
mi->inflight[rq_data_dir(rq)]++;
return true;
@@ -1413,6 +1413,11 @@ out:
hctx->dispatched[queued_to_index(queued)]++;
+ /* If we didn't flush the entire list, we could have told the driver
+ * there was more coming, but that turned out to be a lie.
+ */
+ if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued)
+ q->mq_ops->commit_rqs(hctx);
/*
* Any items that need requeuing? Stuff them into hctx->dispatch,
* that is where we will continue on next queue run.
@@ -1426,14 +1431,6 @@ out:
blk_mq_release_budgets(q, nr_budgets);
- /*
- * If we didn't flush the entire list, we could have told
- * the driver there was more coming, but that turned out to
- * be a lie.
- */
- if (q->mq_ops->commit_rqs && queued)
- q->mq_ops->commit_rqs(hctx);
-
spin_lock(&hctx->lock);
list_splice_tail_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
@@ -1807,7 +1804,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
/*
* If we are stopped, don't run the queue.
*/
- if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ if (blk_mq_hctx_stopped(hctx))
return;
__blk_mq_run_hw_queue(hctx);
@@ -1940,13 +1937,18 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
unsigned int nr_segs)
{
+ int err;
+
if (bio->bi_opf & REQ_RAHEAD)
rq->cmd_flags |= REQ_FAILFAST_MASK;
rq->__sector = bio->bi_iter.bi_sector;
rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs);
- blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
+
+ /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
+ err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
+ WARN_ON_ONCE(err);
blk_account_io_start(rq);
}
@@ -2080,6 +2082,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
int queued = 0;
+ int errors = 0;
while (!list_empty(list)) {
blk_status_t ret;
@@ -2096,6 +2099,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
break;
}
blk_mq_end_request(rq, ret);
+ errors++;
} else
queued++;
}
@@ -2105,7 +2109,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
* the driver there was more coming, but that turned out to
* be a lie.
*/
- if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs && queued)
+ if ((!list_empty(list) || errors) &&
+ hctx->queue->mq_ops->commit_rqs && queued)
hctx->queue->mq_ops->commit_rqs(hctx);
}