aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-11-08 09:03:51 -0700
committerJens Axboe <axboe@kernel.dk>2018-11-08 10:24:23 -0700
commitae8799125d565c798e49dcab4bf182dbfc483524 (patch)
tree9cf36f53aad46a716d8b7476da384823d82e9e38
parentblk-mq-tag: change busy_iter_fn to return whether to continue or not (diff)
downloadwireguard-linux-ae8799125d565c798e49dcab4bf182dbfc483524.tar.xz
wireguard-linux-ae8799125d565c798e49dcab4bf182dbfc483524.zip
blk-mq: provide a helper to check if a queue is busy
Returns true if the queue currently has requests pending, false if not. DM can use this to replace the atomic_inc/dec they do per device to see if a device is busy. Reviewed-by: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c26
-rw-r--r--include/linux/blk-mq.h2
2 files changed, 28 insertions, 0 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4a622c832b31..4880e13e2394 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -790,6 +790,32 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);
+static bool blk_mq_check_busy(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ void *priv, bool reserved)
+{
+ /*
+ * If we find a request, we know the queue is busy. Return false
+ * to stop the iteration.
+ */
+ if (rq->q == hctx->queue) {
+ bool *busy = priv;
+
+ *busy = true;
+ return false;
+ }
+
+ return true;
+}
+
+bool blk_mq_queue_busy(struct request_queue *q)
+{
+ bool busy = false;
+
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_busy, &busy);
+ return busy;
+}
+EXPORT_SYMBOL_GPL(blk_mq_queue_busy);
+
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
req->rq_flags |= RQF_TIMED_OUT;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ff497dfcbbf9..929e8abc5535 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -250,6 +250,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+bool blk_mq_queue_busy(struct request_queue *q);
+
enum {
/* return when out of requests */
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),