aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-05-16 20:27:58 +0200
committerJens Axboe <axboe@kernel.dk>2020-05-19 09:34:29 -0600
commita5ea5811058ddb91da604afe77acd5ff29babe29 (patch)
treef4fa697027404c6669c122f8ace5e14dd22f7646 /block
parentblktrace: Report pid with note messages (diff)
downloadwireguard-linux-a5ea5811058ddb91da604afe77acd5ff29babe29.tar.xz
wireguard-linux-a5ea5811058ddb91da604afe77acd5ff29babe29.zip
blk-mq: move the call to blk_queue_enter_live out of blk_mq_get_request
Move the blk_queue_enter_live calls into the callers, where they can successively be cleaned up. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c27
1 files changed, 16 insertions, 11 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4f8adef7fd0d..b7e06673a30b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -342,8 +342,6 @@ static struct request *blk_mq_get_request(struct request_queue *q,
bool clear_ctx_on_error = false;
u64 alloc_time_ns = 0;
- blk_queue_enter_live(q);
-
/* alloc_time includes depth and tag waits */
if (blk_queue_rq_alloc_time(q))
alloc_time_ns = ktime_get_ns();
@@ -379,7 +377,6 @@ static struct request *blk_mq_get_request(struct request_queue *q,
if (tag == BLK_MQ_TAG_FAIL) {
if (clear_ctx_on_error)
data->ctx = NULL;
- blk_queue_exit(q);
return NULL;
}
@@ -409,16 +406,19 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
if (ret)
return ERR_PTR(ret);
+ blk_queue_enter_live(q);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
- return ERR_PTR(-EWOULDBLOCK);
-
+ goto out_queue_exit;
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
return rq;
+out_queue_exit:
+ blk_queue_exit(q);
+ return ERR_PTR(-EWOULDBLOCK);
}
EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -450,21 +450,24 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
* Check if the hardware context is actually mapped to anything.
* If not tell the caller that it should skip this queue.
*/
+ ret = -EXDEV;
alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
- if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
- blk_queue_exit(q);
- return ERR_PTR(-EXDEV);
- }
+ if (!blk_mq_hw_queue_mapped(alloc_data.hctx))
+ goto out_queue_exit;
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
+ ret = -EWOULDBLOCK;
+ blk_queue_enter_live(q);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
- return ERR_PTR(-EWOULDBLOCK);
-
+ goto out_queue_exit;
return rq;
+out_queue_exit:
+ blk_queue_exit(q);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
@@ -2043,8 +2046,10 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
+ blk_queue_enter_live(q);
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
+ blk_queue_exit(q);
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);