aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-29 15:06:13 -0600
committerJens Axboe <axboe@kernel.dk>2018-11-07 13:44:59 -0700
commitea4f995ee8b8f0578b3319949f2edd5d812fdb0a (patch)
treef7516777fbd8b2fc16cf75b846792981c3a07434 /block/blk-mq.c
parentblk-mq: separate number of hardware queues from nr_cpu_ids (diff)
downloadwireguard-linux-ea4f995ee8b8f0578b3319949f2edd5d812fdb0a.tar.xz
wireguard-linux-ea4f995ee8b8f0578b3319949f2edd5d812fdb0a.zip
blk-mq: cache request hardware queue mapping
We call blk_mq_map_queue() a lot, at least two times for each request per IO, sometimes more. Since we now have an indirect call as well in that function. cache the mapping so we don't have to re-call blk_mq_map_queue() for the same request multiple times. Reviewed-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ccf135cf41b0..6b2859d3ad23 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -300,6 +300,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = data->q;
rq->mq_ctx = data->ctx;
+ rq->mq_hctx = data->hctx;
rq->rq_flags = rq_flags;
rq->cmd_flags = op;
if (data->flags & BLK_MQ_REQ_PREEMPT)
@@ -472,10 +473,11 @@ static void __blk_mq_free_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
const int sched_tag = rq->internal_tag;
blk_pm_mark_last_busy(rq);
+ rq->mq_hctx = NULL;
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
@@ -489,7 +491,7 @@ void blk_mq_free_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->cmd_flags, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->rq_flags & RQF_ELVPRIV) {
if (e && e->type->ops.finish_request)
@@ -983,7 +985,7 @@ bool blk_mq_get_driver_tag(struct request *rq)
{
struct blk_mq_alloc_data data = {
.q = rq->q,
- .hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu),
+ .hctx = rq->mq_hctx,
.flags = BLK_MQ_REQ_NOWAIT,
.cmd_flags = rq->cmd_flags,
};
@@ -1149,7 +1151,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist);
- hctx = blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu);
+ hctx = rq->mq_hctx;
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break;
@@ -1579,9 +1581,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
*/
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
{
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
- ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -1790,9 +1790,7 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
blk_status_t ret;
int srcu_idx;
blk_qc_t unused_cookie;
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, rq->cmd_flags,
- ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
@@ -1917,9 +1915,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_put_ctx(data.ctx);
if (same_queue_rq) {
- data.hctx = blk_mq_map_queue(q,
- same_queue_rq->cmd_flags,
- same_queue_rq->mq_ctx->cpu);
+ data.hctx = same_queue_rq->mq_hctx;
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}