aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c198
1 files changed, 110 insertions, 88 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7c6dd6f75190..60c7a7d74852 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -293,21 +293,6 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
}
EXPORT_SYMBOL(__scsi_execute);
-/**
- * scsi_init_cmd_errh - Initialize cmd fields related to error handling.
- * @cmd: command that is ready to be queued.
- *
- * This function has the job of initializing a number of fields related to error
- * handling. Typically this will be called once for each command, as required.
- */
-static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
-{
- scsi_set_resid(cmd, 0);
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- if (cmd->cmd_len == 0)
- cmd->cmd_len = scsi_command_size(cmd->cmnd);
-}
-
/*
* Wake up the error handler if necessary. Avoid as follows that the error
* handler is not woken up if host in-flight requests number ==
@@ -530,7 +515,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
}
}
-static void scsi_free_sgtables(struct scsi_cmnd *cmd)
+void scsi_free_sgtables(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table,
@@ -539,6 +524,7 @@ static void scsi_free_sgtables(struct scsi_cmnd *cmd)
sg_free_table_chained(&cmd->prot_sdb->table,
SCSI_INLINE_PROT_SG_CNT);
}
+EXPORT_SYMBOL_GPL(scsi_free_sgtables);
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
@@ -549,10 +535,27 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_run_queue_async(struct scsi_device *sdev)
{
if (scsi_target(sdev)->single_lun ||
- !list_empty(&sdev->host->starved_list))
+ !list_empty(&sdev->host->starved_list)) {
kblockd_schedule_work(&sdev->requeue_work);
- else
- blk_mq_run_hw_queues(sdev->request_queue, true);
+ } else {
+ /*
+ * smp_mb() present in sbitmap_queue_clear() or implied in
+ * .end_io is for ordering writing .device_busy in
+ * scsi_device_unbusy() and reading sdev->restarts.
+ */
+ int old = atomic_read(&sdev->restarts);
+
+ /*
+ * ->restarts has to be kept as non-zero if new budget
+ * contention occurs.
+ *
+ * No need to run queue when either another re-run
+ * queue wins in updating ->restarts or a new budget
+ * contention occurs.
+ */
+ if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
+ blk_mq_run_hw_queues(sdev->request_queue, true);
+ }
}
/* Returns false when no more bytes to process, true if there are more */
@@ -652,6 +655,23 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
scsi_mq_requeue_cmd(cmd);
}
+static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
+{
+ struct request *req = cmd->request;
+ unsigned long wait_for;
+
+ if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ return false;
+
+ wait_for = (cmd->allowed + 1) * req->timeout;
+ if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
+ wait_for/HZ);
+ return true;
+ }
+ return false;
+}
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
@@ -660,7 +680,6 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
- unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -758,6 +777,15 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
/* See SSC3rXX or current. */
action = ACTION_FAIL;
break;
+ case DATA_PROTECT:
+ action = ACTION_FAIL;
+ if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
+ (sshdr.asc == 0x55 &&
+ (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
+ /* Insufficient zone resources */
+ blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
+ }
+ break;
default:
action = ACTION_FAIL;
break;
@@ -765,8 +793,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
} else
action = ACTION_FAIL;
- if (action != ACTION_FAIL &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
+ if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
action = ACTION_FAIL;
switch (action) {
@@ -795,7 +822,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
}
if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
return;
- /*FALLTHRU*/
+ fallthrough;
case ACTION_REPREP:
scsi_io_completion_reprep(cmd, q);
break;
@@ -966,7 +993,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
}
/**
- * scsi_init_io - SCSI I/O initialization function.
+ * scsi_alloc_sgtables - allocate S/G tables for a command
* @cmd: command descriptor we wish to initialize
*
* Returns:
@@ -974,7 +1001,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
* * BLK_STS_RESOURCE - if the failure is retryable
* * BLK_STS_IOERR - if the failure is fatal
*/
-blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
+blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
@@ -1066,7 +1093,7 @@ out_free_sgtables:
scsi_free_sgtables(cmd);
return ret;
}
-EXPORT_SYMBOL(scsi_init_io);
+EXPORT_SYMBOL(scsi_alloc_sgtables);
/**
* scsi_initialize_rq - initialize struct scsi_cmnd partially
@@ -1154,7 +1181,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
* submit a request without an attached bio.
*/
if (req->bio) {
- blk_status_t ret = scsi_init_io(cmd);
+ blk_status_t ret = scsi_alloc_sgtables(cmd);
if (unlikely(ret != BLK_STS_OK))
return ret;
} else {
@@ -1164,58 +1191,16 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
}
cmd->cmd_len = scsi_req(req)->cmd_len;
+ if (cmd->cmd_len == 0)
+ cmd->cmd_len = scsi_command_size(cmd->cmnd);
cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = scsi_req(req)->retries;
return BLK_STS_OK;
}
-/*
- * Setup a normal block command. These are simple request from filesystems
- * that still need to be translated to SCSI CDBs from the ULD.
- */
-static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev,
- struct request *req)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-
- if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
- blk_status_t ret = sdev->handler->prep_fn(sdev, req);
- if (ret != BLK_STS_OK)
- return ret;
- }
-
- cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
- memset(cmd->cmnd, 0, BLK_MAX_CDB);
- return scsi_cmd_to_driver(cmd)->init_command(cmd);
-}
-
-static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
- struct request *req)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
- blk_status_t ret;
-
- if (!blk_rq_bytes(req))
- cmd->sc_data_direction = DMA_NONE;
- else if (rq_data_dir(req) == WRITE)
- cmd->sc_data_direction = DMA_TO_DEVICE;
- else
- cmd->sc_data_direction = DMA_FROM_DEVICE;
-
- if (blk_rq_is_scsi(req))
- ret = scsi_setup_scsi_cmnd(sdev, req);
- else
- ret = scsi_setup_fs_cmnd(sdev, req);
-
- if (ret != BLK_STS_OK)
- scsi_free_sgtables(cmd);
-
- return ret;
-}
-
static blk_status_t
-scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
case SDEV_OFFLINE:
@@ -1439,7 +1424,6 @@ static bool scsi_mq_lld_busy(struct request_queue *q)
static void scsi_softirq_done(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
@@ -1449,13 +1433,8 @@ static void scsi_softirq_done(struct request *rq)
atomic_inc(&cmd->device->ioerr_cnt);
disposition = scsi_decide_disposition(cmd);
- if (disposition != SUCCESS &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- scmd_printk(KERN_ERR, cmd,
- "timing out command, waited %lus\n",
- wait_for/HZ);
+ if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
disposition = SUCCESS;
- }
scsi_log_completion(cmd, disposition);
@@ -1563,7 +1542,7 @@ static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
sizeof(struct scatterlist);
}
-static blk_status_t scsi_mq_prep_fn(struct request *req)
+static blk_status_t scsi_prepare_cmd(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
@@ -1575,6 +1554,10 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
cmd->request = req;
cmd->tag = req->tag;
cmd->prot_op = SCSI_PROT_NORMAL;
+ if (blk_rq_bytes(req))
+ cmd->sc_data_direction = rq_dma_dir(req);
+ else
+ cmd->sc_data_direction = DMA_NONE;
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
@@ -1586,9 +1569,23 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
(struct scatterlist *)(cmd->prot_sdb + 1);
}
- blk_mq_start_request(req);
+ /*
+ * Special handling for passthrough commands, which don't go to the ULP
+ * at all:
+ */
+ if (blk_rq_is_scsi(req))
+ return scsi_setup_scsi_cmnd(sdev, req);
+
+ if (sdev->handler && sdev->handler->prep_fn) {
+ blk_status_t ret = sdev->handler->prep_fn(sdev, req);
+
+ if (ret != BLK_STS_OK)
+ return ret;
+ }
- return scsi_setup_cmnd(sdev, req);
+ cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
+ memset(cmd->cmnd, 0, BLK_MAX_CDB);
+ return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
@@ -1612,7 +1609,30 @@ static bool scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
- return scsi_dev_queue_ready(q, sdev);
+ if (scsi_dev_queue_ready(q, sdev))
+ return true;
+
+ atomic_inc(&sdev->restarts);
+
+ /*
+ * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
+ * .restarts must be incremented before .device_busy is read because the
+ * code in scsi_run_queue_async() depends on the order of these operations.
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * If all in-flight requests originated from this LUN are completed
+ * before reading .device_busy, sdev->device_busy will be observed as
+ * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
+ * soon. Otherwise, completion of one of these requests will observe
+ * the .restarts flag, and the request queue will be run for handling
+ * this request, see scsi_end_request().
+ */
+ if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
+ !scsi_device_blocked(sdev)))
+ blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
+ return false;
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1631,7 +1651,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
* commands.
*/
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
- ret = scsi_prep_state_check(sdev, req);
+ ret = scsi_device_state_check(sdev, req);
if (ret != BLK_STS_OK)
goto out_put_budget;
}
@@ -1643,13 +1663,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
goto out_dec_target_busy;
if (!(req->rq_flags & RQF_DONTPREP)) {
- ret = scsi_mq_prep_fn(req);
+ ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
} else {
clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
- blk_mq_start_request(req);
}
cmd->flags &= SCMD_PRESERVED_FLAGS;
@@ -1658,9 +1677,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (bd->last)
cmd->flags |= SCMD_LAST;
- scsi_init_cmd_errh(cmd);
+ scsi_set_resid(cmd, 0);
+ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
cmd->scsi_done = scsi_mq_done;
+ blk_mq_start_request(req);
reason = scsi_dispatch_cmd(cmd);
if (reason) {
scsi_set_blocked(cmd, reason);
@@ -1891,6 +1912,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
tag_set->driver_data = shost;
+ if (shost->host_tagset)
+ tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
return blk_mq_alloc_tag_set(tag_set);
}
@@ -1919,7 +1942,6 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
return sdev;
}
-EXPORT_SYMBOL_GPL(scsi_device_from_queue);
/**
* scsi_block_requests - Utility function used by low-level drivers to prevent