aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target/loop.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2020-12-03 09:26:37 +0800
committerJens Axboe <axboe@kernel.dk>2020-12-07 20:30:19 -0700
commit88c9979334aa5ff8c814ddf578f3113ed6c5ce8e (patch)
treea2b2694702a8e9a4beba7b38697852ed4b902484 /drivers/nvme/target/loop.c
parentblk-mq: add new API of blk_mq_hctx_set_fq_lock_class (diff)
downloadlinux-dev-88c9979334aa5ff8c814ddf578f3113ed6c5ce8e.tar.xz
linux-dev-88c9979334aa5ff8c814ddf578f3113ed6c5ce8e.zip
nvme-loop: use blk_mq_hctx_set_fq_lock_class to set loop's lock class
Set nvme-loop's lock class via blk_mq_hctx_set_fq_lock_class for avoiding lockdep possible recursive locking, then we can remove the dynamically allocated lock class for each flush queue, finally we can avoid horrible SCSI probe delay. This way may not address situation in which one nvme-loop is backed on another nvme-loop. However, in reality, people seldom uses this way for test. Even though someone played in this way, it is just one recursive locking false positive, no real deadlock issue. Tested-by: Kashyap Desai <kashyap.desai@broadcom.com> Reported-by: Qian Cai <cai@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Sumit Saxena <sumit.saxena@broadcom.com> Cc: John Garry <john.garry@huawei.com> Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Bart Van Assche <bvanassche@acm.org> Cc: Hannes Reinecke <hare@suse.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme/target/loop.c')
-rw-r--r--drivers/nvme/target/loop.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f6d81239be21..07806016c09d 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -211,6 +211,8 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
}
+static struct lock_class_key loop_hctx_fq_lock_key;
+
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -219,6 +221,14 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
+ /*
+ * flush_end_io() can be called recursively for us, so use our own
+ * lock class key for avoiding lockdep possible recursive locking,
+ * then we can remove the dynamically allocated lock class for each
+ * flush queue, that way may cause horrible boot delay.
+ */
+ blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
+
hctx->driver_data = queue;
return 0;
}