aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-09 11:20:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-09 11:20:07 -0800
commit3e28850cbd359bed841b832200f9fc208a9ef040 (patch)
treeef4e5b294f934f58fc08feb89d24291b71c01d4a /drivers/scsi/scsi_lib.c
parentMerge tag 'for-5.16/bdev-size-2021-11-09' of git://git.kernel.dk/linux-block (diff)
parentnvme: wait until quiesce is done (diff)
downloadlinux-dev-3e28850cbd359bed841b832200f9fc208a9ef040.tar.xz
linux-dev-3e28850cbd359bed841b832200f9fc208a9ef040.zip
Merge tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - Set of fixes for the batched tag allocation (Ming, me) - add_disk() error handling fix (Luis) - Nested queue quiesce fixes (Ming) - Shared tags init error handling fix (Ye) - Misc cleanups (Jean, Ming, me) * tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block: nvme: wait until quiesce is done scsi: make sure that request queue queiesce and unquiesce balanced scsi: avoid to quiesce sdev->request_queue two times blk-mq: add one API for waiting until quiesce is done blk-mq: don't free tags if the tag_set is used by other device in queue initialztion block: fix device_add_disk() kobject_create_and_add() error handling block: ensure cached plug request matches the current queue block: move queue enter logic into blk_mq_submit_bio() block: make bio_queue_enter() fast-path available inline block: split request allocation components into helpers block: have plug stored requests hold references to the queue blk-mq: update hctx->nr_active in blk_mq_end_request_batch() blk-mq: add RQF_ELV debug entry blk-mq: only try to run plug merge if request has same queue with incoming bio block: move RQF_ELV setting into allocators dm: don't stop request queue after the dm device is suspended block: replace always false argument with 'false' block: assign correct tag before doing prefetch of request blk-mq: fix redundant check of !e expression
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c62
1 files changed, 40 insertions, 22 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1344553afe70..b731c2983515 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2665,6 +2665,40 @@ scsi_target_resume(struct scsi_target *starget)
}
EXPORT_SYMBOL(scsi_target_resume);
+static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
+{
+ if (scsi_device_set_state(sdev, SDEV_BLOCK))
+ return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
+
+ return 0;
+}
+
+void scsi_start_queue(struct scsi_device *sdev)
+{
+ if (cmpxchg(&sdev->queue_stopped, 1, 0))
+ blk_mq_unquiesce_queue(sdev->request_queue);
+}
+
+static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
+{
+ /*
+ * The atomic variable of ->queue_stopped covers that
+ * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue.
+ *
+ * However, we still need to wait until quiesce is done
+ * in case that queue has been stopped.
+ */
+ if (!cmpxchg(&sdev->queue_stopped, 0, 1)) {
+ if (nowait)
+ blk_mq_quiesce_queue_nowait(sdev->request_queue);
+ else
+ blk_mq_quiesce_queue(sdev->request_queue);
+ } else {
+ if (!nowait)
+ blk_mq_wait_quiesce_done(sdev->request_queue);
+ }
+}
+
/**
* scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
* @sdev: device to block
@@ -2681,24 +2715,16 @@ EXPORT_SYMBOL(scsi_target_resume);
*/
int scsi_internal_device_block_nowait(struct scsi_device *sdev)
{
- struct request_queue *q = sdev->request_queue;
- int err = 0;
-
- err = scsi_device_set_state(sdev, SDEV_BLOCK);
- if (err) {
- err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
-
- if (err)
- return err;
- }
+ int ret = __scsi_internal_device_block_nowait(sdev);
/*
* The device has transitioned to SDEV_BLOCK. Stop the
* block layer from calling the midlayer with this device's
* request queue.
*/
- blk_mq_quiesce_queue_nowait(q);
- return 0;
+ if (!ret)
+ scsi_stop_queue(sdev, true);
+ return ret;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
@@ -2719,25 +2745,17 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
*/
static int scsi_internal_device_block(struct scsi_device *sdev)
{
- struct request_queue *q = sdev->request_queue;
int err;
mutex_lock(&sdev->state_mutex);
- err = scsi_internal_device_block_nowait(sdev);
+ err = __scsi_internal_device_block_nowait(sdev);
if (err == 0)
- blk_mq_quiesce_queue(q);
+ scsi_stop_queue(sdev, false);
mutex_unlock(&sdev->state_mutex);
return err;
}
-void scsi_start_queue(struct scsi_device *sdev)
-{
- struct request_queue *q = sdev->request_queue;
-
- blk_mq_unquiesce_queue(q);
-}
-
/**
* scsi_internal_device_unblock_nowait - resume a device after a block request
* @sdev: device to resume