From bebe84ebeec4d030aa65af58376305749762e5a0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:39 +0200 Subject: blk-mq: remove blk-mq-tag.h blk-mq-tag.h is always included by blk-mq.h, and causes recursive inclusion hell with further changes. Just merge it into blk-mq.h instead. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.h | 61 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-mq.h b/block/blk-mq.h index ef59fee62780..7a041fecea02 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -3,7 +3,6 @@ #define INT_BLK_MQ_H #include "blk-stat.h" -#include "blk-mq-tag.h" struct blk_mq_tag_set; @@ -30,6 +29,12 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp; +enum { + BLK_MQ_NO_TAG = -1U, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, +}; + void blk_mq_submit_bio(struct bio *bio); int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, unsigned int flags); @@ -164,6 +169,60 @@ struct blk_mq_alloc_data { struct blk_mq_hw_ctx *hctx; }; +struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, + unsigned int reserved_tags, int node, int alloc_policy); +void blk_mq_free_tags(struct blk_mq_tags *tags); +int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, + struct sbitmap_queue *breserved_tags, unsigned int queue_depth, + unsigned int reserved, int node, int alloc_policy); + +unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); +unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, + unsigned int *offset); +void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, + unsigned int tag); +void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); +int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, + struct blk_mq_tags **tags, unsigned int depth, bool can_grow); +void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, + unsigned int size); +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); + +void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, + void *priv); +void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, + void *priv); + +static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, + struct blk_mq_hw_ctx *hctx) +{ + if (!hctx) + return &bt->ws[0]; + return sbq_wait_ptr(bt, &hctx->wait_index); +} + +void __blk_mq_tag_busy(struct blk_mq_hw_ctx *); +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); + +static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + __blk_mq_tag_busy(hctx); +} + +static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + __blk_mq_tag_idle(hctx); +} + +static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, + unsigned int tag) +{ + return tag < tags->nr_reserved_tags; +} + static inline bool blk_mq_is_shared_tags(unsigned int flags) { return flags & BLK_MQ_F_TAG_HCTX_SHARED; -- cgit v1.2.3-59-g8ed1b From 90110e04f265b95f59fbae09c228c5920b8a302f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:40 +0200 Subject: blk-mq: include in block/blk-mq.h block/blk-mq.h needs various definitions from , include it there instead of relying on the source files to include both. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-flush.c | 1 - block/blk-mq-cpumap.c | 1 - block/blk-mq-debugfs.c | 1 - block/blk-mq-pci.c | 1 - block/blk-mq-sched.c | 1 - block/blk-mq-sysfs.c | 1 - block/blk-mq-tag.c | 1 - block/blk-mq-virtio.c | 1 - block/blk-mq.c | 1 - block/blk-mq.h | 1 + block/blk-pm.c | 1 - block/blk-stat.c | 1 - block/blk-sysfs.c | 1 - block/kyber-iosched.c | 1 - block/mq-deadline.c | 1 - 15 files changed, 1 insertion(+), 14 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-flush.c b/block/blk-flush.c index a13a1d6caa0f..3c81b0af5b39 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -68,7 +68,6 @@ #include #include #include -#include #include #include "blk.h" diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 0c612c19feb8..9638b25fd521 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -12,7 +12,6 @@ #include #include -#include #include "blk.h" #include "blk-mq.h" diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index ace2bcf1cf9a..d23a8554ec4a 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -7,7 +7,6 @@ #include #include -#include #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c index a90b88fd1332..d47b5c73c9eb 100644 --- a/block/blk-mq-pci.c +++ b/block/blk-mq-pci.c @@ -4,7 +4,6 @@ */ #include #include -#include #include #include #include diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 1029e8eed5ee..c4b2d44b2d4e 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -6,7 +6,6 @@ */ #include #include -#include #include #include diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index ba84caa868dd..156e9bb07abf 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -10,7 +10,6 @@ #include #include -#include #include "blk.h" #include "blk-mq.h" diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 1f8b065d72c5..d6af9d431dc6 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -9,7 +9,6 @@ #include #include -#include #include #include "blk.h" #include "blk-mq.h" diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c index 6589f076a096..68d0945c0b08 100644 --- a/block/blk-mq-virtio.c +++ b/block/blk-mq-virtio.c @@ -3,7 +3,6 @@ * Copyright (c) 2016 Christoph Hellwig. */ #include -#include #include #include #include diff --git a/block/blk-mq.c b/block/blk-mq.c index 545600be2063..29014a0f9f39 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -32,7 +32,6 @@ #include -#include #include #include "blk.h" #include "blk-mq.h" diff --git a/block/blk-mq.h b/block/blk-mq.h index 7a041fecea02..fa13b694ff27 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -2,6 +2,7 @@ #ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H +#include #include "blk-stat.h" struct blk_mq_tag_set; diff --git a/block/blk-pm.c b/block/blk-pm.c index 8af5ee54feb4..6b72b2e03fc8 100644 --- a/block/blk-pm.c +++ b/block/blk-pm.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#include #include #include #include diff --git a/block/blk-stat.c b/block/blk-stat.c index bc7e0ed81642..7ff76ae6c76a 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -6,7 +6,6 @@ */ #include #include -#include #include "blk-stat.h" #include "blk-mq.h" diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 1a743b4f2958..a64208583853 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include "blk.h" diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index d0a4838ce7fc..3f9fb2090c91 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -8,7 +8,6 @@ #include #include -#include #include #include diff --git a/block/mq-deadline.c b/block/mq-deadline.c index a18526e11194..af9e79050dcc 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3-59-g8ed1b From 94aa228c2a2f6edc8e9b7c4745942ea4c5978977 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:41 +0200 Subject: blk-mq: move more logic into blk_mq_insert_requests Move all logic related to the direct insert (including the call to blk_mq_run_hw_queue) into blk_mq_insert_requests to streamline the code flow up a bit, and to allow marking blk_mq_try_issue_list_directly static. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-5-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 17 ++--------------- block/blk-mq.c | 20 ++++++++++++++++++-- block/blk-mq.h | 4 +--- 3 files changed, 21 insertions(+), 20 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index c4b2d44b2d4e..811a9765b745 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -472,23 +472,10 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, e = hctx->queue->elevator; if (e) { e->type->ops.insert_requests(hctx, list, false); + blk_mq_run_hw_queue(hctx, run_queue_async); } else { - /* - * try to issue requests directly if the hw queue isn't - * busy in case of 'none' scheduler, and this way may save - * us one extra enqueue & dequeue to sw queue. - */ - if (!hctx->dispatch_busy && !run_queue_async) { - blk_mq_run_dispatch_ops(hctx->queue, - blk_mq_try_issue_list_directly(hctx, list)); - if (list_empty(list)) - goto out; - } - blk_mq_insert_requests(hctx, ctx, list); + blk_mq_insert_requests(hctx, ctx, list, run_queue_async); } - - blk_mq_run_hw_queue(hctx, run_queue_async); - out: percpu_ref_put(&q->q_usage_counter); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 29014a0f9f39..536f001282bb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -44,6 +44,9 @@ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); +static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, + struct list_head *list); + static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, blk_qc_t qc) { @@ -2495,12 +2498,23 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head, } void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list) + struct list_head *list, bool run_queue_async) { struct request *rq; enum hctx_type type = hctx->type; + /* + * Try to issue requests directly if the hw queue isn't busy to save an + * extra enqueue & dequeue to the sw queue. + */ + if (!hctx->dispatch_busy && !run_queue_async) { + blk_mq_run_dispatch_ops(hctx->queue, + blk_mq_try_issue_list_directly(hctx, list)); + if (list_empty(list)) + goto out; + } + /* * preemption doesn't flush plug list, so it's possible ctx->cpu is * offline now @@ -2514,6 +2528,8 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, list_splice_tail_init(list, &ctx->rq_lists[type]); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); +out: + blk_mq_run_hw_queue(hctx, run_queue_async); } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, @@ -2755,7 +2771,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) } while (!rq_list_empty(plug->mq_list)); } -void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, +static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; diff --git a/block/blk-mq.h b/block/blk-mq.h index fa13b694ff27..5d551f9ef2d6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -70,9 +70,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, void blk_mq_request_bypass_insert(struct request *rq, bool at_head, bool run_queue); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list); -void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - struct list_head *list); + struct list_head *list, bool run_queue_async); /* * CPU -> queue mappings -- cgit v1.2.3-59-g8ed1b From 05a93117703e7b2e40fa9193e622079b30395bcc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:42 +0200 Subject: blk-mq: fold blk_mq_sched_insert_requests into blk_mq_dispatch_plug_list blk_mq_dispatch_plug_list is the only caller of blk_mq_sched_insert_requests, and it makes sense to just fold it there as blk_mq_sched_insert_requests isn't specific to I/O schedulers despite the name. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-6-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 24 ------------------------ block/blk-mq-sched.h | 3 --- block/blk-mq.c | 17 +++++++++++++---- block/blk-mq.h | 2 -- block/mq-deadline.c | 2 +- 5 files changed, 14 insertions(+), 34 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 811a9765b745..9c0d231722d9 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -455,30 +455,6 @@ run: blk_mq_run_hw_queue(hctx, async); } -void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async) -{ - struct elevator_queue *e; - struct request_queue *q = hctx->queue; - - /* - * blk_mq_sched_insert_requests() is called from flush plug - * context only, and hold one usage counter to prevent queue - * from being released. - */ - percpu_ref_get(&q->q_usage_counter); - - e = hctx->queue->elevator; - if (e) { - e->type->ops.insert_requests(hctx, list, false); - blk_mq_run_hw_queue(hctx, run_queue_async); - } else { - blk_mq_insert_requests(hctx, ctx, list, run_queue_async); - } - percpu_ref_put(&q->q_usage_counter); -} - static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 65cab6e475be..1ec01e9934dc 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -18,9 +18,6 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async); -void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async); void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); diff --git a/block/blk-mq.c b/block/blk-mq.c index 536f001282bb..f1da4f053cc6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2497,9 +2497,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head, blk_mq_run_hw_queue(hctx, false); } -void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async) - +static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, struct list_head *list, + bool run_queue_async) { struct request *rq; enum hctx_type type = hctx->type; @@ -2725,7 +2725,16 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) plug->mq_list = requeue_list; trace_block_unplug(this_hctx->queue, depth, !from_sched); - blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched); + + percpu_ref_get(&this_hctx->queue->q_usage_counter); + if (this_hctx->queue->elevator) { + this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, + &list, false); + blk_mq_run_hw_queue(this_hctx, from_sched); + } else { + blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); + } + percpu_ref_put(&this_hctx->queue->q_usage_counter); } void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) diff --git a/block/blk-mq.h b/block/blk-mq.h index 5d551f9ef2d6..bd7ae5e67a52 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -69,8 +69,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); void blk_mq_request_bypass_insert(struct request *rq, bool at_head, bool run_queue); -void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async); /* * CPU -> queue mappings diff --git a/block/mq-deadline.c b/block/mq-deadline.c index af9e79050dcc..d62a3039c8e0 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -820,7 +820,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } /* - * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). + * Called from blk_mq_sched_insert_request() or blk_mq_dispatch_plug_list(). */ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *list, bool at_head) -- cgit v1.2.3-59-g8ed1b From a88db1e0003eda8adbe3c499b81f736d8065b952 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:44 +0200 Subject: blk-mq: fold __blk_mq_insert_request into blk_mq_insert_request There is no good point in keeping the __blk_mq_insert_request around for two function calls and a singler caller. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-8-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 14 ++------------ block/blk-mq.h | 2 -- 2 files changed, 2 insertions(+), 14 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-mq.c b/block/blk-mq.c index 78e54a64fe92..103caf1bae27 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2463,17 +2463,6 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); } -void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) -{ - struct blk_mq_ctx *ctx = rq->mq_ctx; - - lockdep_assert_held(&ctx->lock); - - __blk_mq_insert_req_list(hctx, rq, at_head); - blk_mq_hctx_mark_pending(hctx, ctx); -} - /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. @@ -2598,7 +2587,8 @@ static void blk_mq_insert_request(struct request *rq, bool at_head, e->type->ops.insert_requests(hctx, &list, at_head); } else { spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq, at_head); + __blk_mq_insert_req_list(hctx, rq, at_head); + blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); } diff --git a/block/blk-mq.h b/block/blk-mq.h index bd7ae5e67a52..e2d59e33046e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -65,8 +65,6 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, /* * Internal helpers for request insertion into sw queues */ -void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head); void blk_mq_request_bypass_insert(struct request *rq, bool at_head, bool run_queue); -- cgit v1.2.3-59-g8ed1b From 2394395cd598f6404c57ae0b63afb5d37e94924d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:52 +0200 Subject: blk-mq: don't run the hw_queue from blk_mq_request_bypass_insert blk_mq_request_bypass_insert takes a bool parameter to control how to run the queue at the end of the function. Move the blk_mq_run_hw_queue call to the callers that want it instead. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-16-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-flush.c | 4 +++- block/blk-mq.c | 24 +++++++++++------------- block/blk-mq.h | 3 +-- 3 files changed, 15 insertions(+), 16 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-flush.c b/block/blk-flush.c index 62ef98f604fb..3561aba8cc23 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -389,6 +389,7 @@ void blk_insert_flush(struct request *rq) unsigned long fflags = q->queue_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; /* * @policy now records what operations need to be done. Adjust @@ -425,7 +426,8 @@ void blk_insert_flush(struct request *rq) */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { - blk_mq_request_bypass_insert(rq, false, true); + blk_mq_request_bypass_insert(rq, false); + blk_mq_run_hw_queue(hctx, false); return; } diff --git a/block/blk-mq.c b/block/blk-mq.c index d1941db1ad3c..cde7ba9c39bf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1442,7 +1442,7 @@ static void blk_mq_requeue_work(struct work_struct *work) if (rq->rq_flags & RQF_DONTPREP) { rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); - blk_mq_request_bypass_insert(rq, false, false); + blk_mq_request_bypass_insert(rq, false); } else if (rq->rq_flags & RQF_SOFTBARRIER) { rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); @@ -2457,13 +2457,11 @@ static void blk_mq_run_work_fn(struct work_struct *work) * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. * @at_head: true if the request should be inserted at the head of the list. - * @run_queue: If we should run the hardware queue after inserting the request. * * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ -void blk_mq_request_bypass_insert(struct request *rq, bool at_head, - bool run_queue) +void blk_mq_request_bypass_insert(struct request *rq, bool at_head) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; @@ -2473,9 +2471,6 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head, else list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); - - if (run_queue) - blk_mq_run_hw_queue(hctx, false); } static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, @@ -2530,7 +2525,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) * and it is added to the scheduler queue, there is no chance to * dispatch it given we prioritize requests in hctx->dispatch. */ - blk_mq_request_bypass_insert(rq, at_head, false); + blk_mq_request_bypass_insert(rq, at_head); } else if (rq->rq_flags & RQF_FLUSH_SEQ) { /* * Firstly normal IO request is inserted to scheduler queue or @@ -2553,7 +2548,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) * Simply queue flush rq to the front of hctx->dispatch so that * intensive flush workloads can benefit in case of NCQ HW. */ - blk_mq_request_bypass_insert(rq, true, false); + blk_mq_request_bypass_insert(rq, true); } else if (q->elevator) { LIST_HEAD(list); @@ -2673,7 +2668,8 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false, true); + blk_mq_request_bypass_insert(rq, false); + blk_mq_run_hw_queue(hctx, false); break; default: blk_mq_end_request(rq, ret); @@ -2720,7 +2716,8 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug) break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false, true); + blk_mq_request_bypass_insert(rq, false); + blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); @@ -2838,8 +2835,9 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false, - list_empty(list)); + blk_mq_request_bypass_insert(rq, false); + if (list_empty(list)) + blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); diff --git a/block/blk-mq.h b/block/blk-mq.h index e2d59e33046e..f30f99166f38 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -65,8 +65,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, /* * Internal helpers for request insertion into sw queues */ -void blk_mq_request_bypass_insert(struct request *rq, bool at_head, - bool run_queue); +void blk_mq_request_bypass_insert(struct request *rq, bool at_head); /* * CPU -> queue mappings -- cgit v1.2.3-59-g8ed1b From 214a441805b8cc090930fb00193125e22466a95a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:53 +0200 Subject: blk-mq: don't kick the requeue_list in blk_mq_add_to_requeue_list blk_mq_add_to_requeue_list takes a bool parameter to control how to kick the requeue list at the end of the function. Move the call to blk_mq_kick_requeue_list to the callers that want it instead. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-17-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-flush.c | 6 ++++-- block/blk-mq.c | 13 +++++++------ block/blk-mq.h | 3 +-- 3 files changed, 12 insertions(+), 10 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-flush.c b/block/blk-flush.c index 3561aba8cc23..015982bd2f7c 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -188,7 +188,8 @@ static void blk_flush_complete_seq(struct request *rq, case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); - blk_mq_add_to_requeue_list(rq, true, true); + blk_mq_add_to_requeue_list(rq, true); + blk_mq_kick_requeue_list(q); break; case REQ_FSEQ_DONE: @@ -345,7 +346,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, smp_wmb(); req_ref_set(flush_rq, 1); - blk_mq_add_to_requeue_list(flush_rq, false, true); + blk_mq_add_to_requeue_list(flush_rq, false); + blk_mq_kick_requeue_list(q); } static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, diff --git a/block/blk-mq.c b/block/blk-mq.c index cde7ba9c39bf..db806c1a194c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1412,12 +1412,17 @@ static void __blk_mq_requeue_request(struct request *rq) void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) { + struct request_queue *q = rq->q; + __blk_mq_requeue_request(rq); /* this request will be re-inserted to io scheduler queue */ blk_mq_sched_requeue_request(rq); - blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); + blk_mq_add_to_requeue_list(rq, true); + + if (kick_requeue_list) + blk_mq_kick_requeue_list(q); } EXPORT_SYMBOL(blk_mq_requeue_request); @@ -1459,8 +1464,7 @@ static void blk_mq_requeue_work(struct work_struct *work) blk_mq_run_hw_queues(q, false); } -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, - bool kick_requeue_list) +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) { struct request_queue *q = rq->q; unsigned long flags; @@ -1479,9 +1483,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, list_add_tail(&rq->queuelist, &q->requeue_list); } spin_unlock_irqrestore(&q->requeue_lock, flags); - - if (kick_requeue_list) - blk_mq_kick_requeue_list(q); } void blk_mq_kick_requeue_list(struct request_queue *q) diff --git a/block/blk-mq.h b/block/blk-mq.h index f30f99166f38..5d3761c50063 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -44,8 +44,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, unsigned int); -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, - bool kick_requeue_list); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); -- cgit v1.2.3-59-g8ed1b From 710fa3789ed94ceee9675f8e189aaf3e7525269a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:54 +0200 Subject: blk-mq: pass a flags argument to blk_mq_insert_request Replace the at_head bool with a flags argument that so far only contains a single BLK_MQ_INSERT_AT_HEAD value. This makes it much easier to grep for head insertions into the blk-mq dispatch queues. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-18-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 27 ++++++++++++++------------- block/blk-mq.h | 3 +++ 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-mq.c b/block/blk-mq.c index db806c1a194c..ba64c4621e29 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -44,7 +44,7 @@ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); -static void blk_mq_insert_request(struct request *rq, bool at_head); +static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list); @@ -1308,7 +1308,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head) return; } - blk_mq_insert_request(rq, at_head); + blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); @@ -1371,7 +1371,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) rq->end_io = blk_end_sync_rq; blk_account_io_start(rq); - blk_mq_insert_request(rq, at_head); + blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, false); if (blk_rq_is_poll(rq)) { @@ -1451,14 +1451,14 @@ static void blk_mq_requeue_work(struct work_struct *work) } else if (rq->rq_flags & RQF_SOFTBARRIER) { rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); - blk_mq_insert_request(rq, true); + blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); } } while (!list_empty(&rq_list)) { rq = list_entry(rq_list.next, struct request, queuelist); list_del_init(&rq->queuelist); - blk_mq_insert_request(rq, false); + blk_mq_insert_request(rq, 0); } blk_mq_run_hw_queues(q, false); @@ -2509,7 +2509,7 @@ out: blk_mq_run_hw_queue(hctx, run_queue_async); } -static void blk_mq_insert_request(struct request *rq, bool at_head) +static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; @@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) * and it is added to the scheduler queue, there is no chance to * dispatch it given we prioritize requests in hctx->dispatch. */ - blk_mq_request_bypass_insert(rq, at_head); + blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD); } else if (rq->rq_flags & RQF_FLUSH_SEQ) { /* * Firstly normal IO request is inserted to scheduler queue or @@ -2556,12 +2556,13 @@ static void blk_mq_insert_request(struct request *rq, bool at_head) WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); list_add(&rq->queuelist, &list); - q->elevator->type->ops.insert_requests(hctx, &list, at_head); + q->elevator->type->ops.insert_requests(hctx, &list, + flags & BLK_MQ_INSERT_AT_HEAD); } else { trace_block_rq_insert(rq); spin_lock(&ctx->lock); - if (at_head) + if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); else list_add_tail(&rq->queuelist, @@ -2653,12 +2654,12 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, blk_status_t ret; if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { - blk_mq_insert_request(rq, false); + blk_mq_insert_request(rq, 0); return; } if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) { - blk_mq_insert_request(rq, false); + blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, false); return; } @@ -2683,7 +2684,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { - blk_mq_insert_request(rq, false); + blk_mq_insert_request(rq, 0); return BLK_STS_OK; } @@ -3018,7 +3019,7 @@ void blk_mq_submit_bio(struct bio *bio) hctx = rq->mq_hctx; if ((rq->rq_flags & RQF_ELV) || (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { - blk_mq_insert_request(rq, false); + blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, true); } else { blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); diff --git a/block/blk-mq.h b/block/blk-mq.h index 5d3761c50063..273eee00524b 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -36,6 +36,9 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, }; +typedef unsigned int __bitwise blk_insert_t; +#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01) + void blk_mq_submit_bio(struct bio *bio); int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, unsigned int flags); -- cgit v1.2.3-59-g8ed1b From 2b5976134bfbc753dec6281da0890c5f194c00c9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:55 +0200 Subject: blk-mq: pass a flags argument to blk_mq_request_bypass_insert Replace the boolean at_head argument with the same flags that are already passed to blk_mq_insert_request. Signed-off-by: Christoph Hellwig Reviewed-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-19-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-flush.c | 2 +- block/blk-mq.c | 18 +++++++++--------- block/blk-mq.h | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-flush.c b/block/blk-flush.c index 015982bd2f7c..1d3af17619de 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -428,7 +428,7 @@ void blk_insert_flush(struct request *rq) */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, 0); blk_mq_run_hw_queue(hctx, false); return; } diff --git a/block/blk-mq.c b/block/blk-mq.c index ba64c4621e29..ff74559d7da1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1447,7 +1447,7 @@ static void blk_mq_requeue_work(struct work_struct *work) if (rq->rq_flags & RQF_DONTPREP) { rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, 0); } else if (rq->rq_flags & RQF_SOFTBARRIER) { rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); @@ -2457,17 +2457,17 @@ static void blk_mq_run_work_fn(struct work_struct *work) /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. - * @at_head: true if the request should be inserted at the head of the list. + * @flags: BLK_MQ_INSERT_* * * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ -void blk_mq_request_bypass_insert(struct request *rq, bool at_head) +void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; spin_lock(&hctx->lock); - if (at_head) + if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &hctx->dispatch); else list_add_tail(&rq->queuelist, &hctx->dispatch); @@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) * and it is added to the scheduler queue, there is no chance to * dispatch it given we prioritize requests in hctx->dispatch. */ - blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD); + blk_mq_request_bypass_insert(rq, flags); } else if (rq->rq_flags & RQF_FLUSH_SEQ) { /* * Firstly normal IO request is inserted to scheduler queue or @@ -2549,7 +2549,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) * Simply queue flush rq to the front of hctx->dispatch so that * intensive flush workloads can benefit in case of NCQ HW. */ - blk_mq_request_bypass_insert(rq, true); + blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD); } else if (q->elevator) { LIST_HEAD(list); @@ -2670,7 +2670,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, 0); blk_mq_run_hw_queue(hctx, false); break; default: @@ -2718,7 +2718,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug) break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, 0); blk_mq_run_hw_queue(hctx, false); goto out; default: @@ -2837,7 +2837,7 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false); + blk_mq_request_bypass_insert(rq, 0); if (list_empty(list)) blk_mq_run_hw_queue(hctx, false); goto out; diff --git a/block/blk-mq.h b/block/blk-mq.h index 273eee00524b..bb16c0a54411 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -67,7 +67,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, /* * Internal helpers for request insertion into sw queues */ -void blk_mq_request_bypass_insert(struct request *rq, bool at_head); +void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags); /* * CPU -> queue mappings -- cgit v1.2.3-59-g8ed1b From b12e5c6c755ae8bec44723f77f037873e3d08021 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 13 Apr 2023 08:40:57 +0200 Subject: blk-mq: pass a flags argument to blk_mq_add_to_requeue_list Replace the boolean at_head argument with the same flags that are already passed to blk_mq_insert_request. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230413064057.707578-21-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-flush.c | 4 ++-- block/blk-mq.c | 6 +++--- block/blk-mq.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'block/blk-mq.h') diff --git a/block/blk-flush.c b/block/blk-flush.c index 1d3af17619de..00dd2f61312d 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -188,7 +188,7 @@ static void blk_flush_complete_seq(struct request *rq, case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); - blk_mq_add_to_requeue_list(rq, true); + blk_mq_add_to_requeue_list(rq, BLK_MQ_INSERT_AT_HEAD); blk_mq_kick_requeue_list(q); break; @@ -346,7 +346,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, smp_wmb(); req_ref_set(flush_rq, 1); - blk_mq_add_to_requeue_list(flush_rq, false); + blk_mq_add_to_requeue_list(flush_rq, BLK_MQ_INSERT_AT_HEAD); blk_mq_kick_requeue_list(q); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 6c3db1a15dad..1e35c829bddd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1419,7 +1419,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) /* this request will be re-inserted to io scheduler queue */ blk_mq_sched_requeue_request(rq); - blk_mq_add_to_requeue_list(rq, true); + blk_mq_add_to_requeue_list(rq, BLK_MQ_INSERT_AT_HEAD); if (kick_requeue_list) blk_mq_kick_requeue_list(q); @@ -1464,7 +1464,7 @@ static void blk_mq_requeue_work(struct work_struct *work) blk_mq_run_hw_queues(q, false); } -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) +void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags) { struct request_queue *q = rq->q; unsigned long flags; @@ -1476,7 +1476,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); spin_lock_irqsave(&q->requeue_lock, flags); - if (at_head) { + if (insert_flags & BLK_MQ_INSERT_AT_HEAD) { rq->rq_flags |= RQF_SOFTBARRIER; list_add(&rq->queuelist, &q->requeue_list); } else { diff --git a/block/blk-mq.h b/block/blk-mq.h index bb16c0a54411..f882677ff106 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -47,7 +47,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, unsigned int); -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); -- cgit v1.2.3-59-g8ed1b