From 5bbf4e5a8e3a780874b2ed77bd1bd57850f3f6da Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jun 2017 18:15:26 +0200 Subject: blk-mq-sched: unify request prepare methods This patch makes sure we always allocate requests in the core blk-mq code and use a common prepare_request method to initialize them for both mq I/O schedulers. For Kyber and additional limit_depth method is added that is called before allocating the request. Also because none of the intializations can really fail the new method does not return an error - instead the bfq finish method is hardened to deal with the no-IOC case. Last but not least this removes the abuse of RQF_QUEUE by the blk-mq scheduling code as RQF_ELFPRIV is all that is needed now. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'block/blk-mq.c') diff --git a/block/blk-mq.c b/block/blk-mq.c index 2f380ab7a603..81d05c19d4b3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -298,16 +298,11 @@ static struct request *blk_mq_get_request(struct request_queue *q, * Flush requests are special and go directly to the * dispatch list. */ - if (!op_is_flush(op) && e->type->ops.mq.get_request) { - rq = e->type->ops.mq.get_request(q, op, data); - if (rq) - rq->rq_flags |= RQF_QUEUED; - goto allocated; - } + if (!op_is_flush(op) && e->type->ops.mq.limit_depth) + e->type->ops.mq.limit_depth(op, data); } rq = __blk_mq_alloc_request(data, op); -allocated: if (!rq) { blk_queue_exit(q); return NULL; @@ -315,17 +310,12 @@ allocated: if (!op_is_flush(op)) { rq->elv.icq = NULL; - if (e && e->type->ops.mq.get_rq_priv) { + if (e && e->type->ops.mq.prepare_request) { if (e->type->icq_cache && rq_ioc(bio)) blk_mq_sched_assign_ioc(rq, bio); - if (e->type->ops.mq.get_rq_priv(q, rq, bio)) { - if (rq->elv.icq) - put_io_context(rq->elv.icq->ioc); - rq->elv.icq = NULL; - } else { - rq->rq_flags |= RQF_ELVPRIV; - } + e->type->ops.mq.prepare_request(rq, bio); + rq->rq_flags |= RQF_ELVPRIV; } } data->hctx->queued++; @@ -413,7 +403,7 @@ void blk_mq_free_request(struct request *rq) struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); const int sched_tag = rq->internal_tag; - if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) { + if (rq->rq_flags & RQF_ELVPRIV) { if (e && e->type->ops.mq.finish_request) e->type->ops.mq.finish_request(rq); if (rq->elv.icq) { -- cgit v1.2.3-59-g8ed1b