aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c82
1 files changed, 30 insertions, 52 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 06056e9ec333..7c6dd6f75190 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -52,7 +52,6 @@
#define SCSI_INLINE_SG_CNT 2
#endif
-static struct kmem_cache *scsi_sdb_cache;
static struct kmem_cache *scsi_sense_cache;
static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);
@@ -390,7 +389,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
-
+
scsi_device_put(sdev);
}
out:
@@ -1461,18 +1460,18 @@ static void scsi_softirq_done(struct request *rq)
scsi_log_completion(cmd, disposition);
switch (disposition) {
- case SUCCESS:
- scsi_finish_command(cmd);
- break;
- case NEEDS_RETRY:
- scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
- break;
- case ADD_TO_MLQUEUE:
- scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
- break;
- default:
- scsi_eh_scmd_add(cmd);
- break;
+ case SUCCESS:
+ scsi_finish_command(cmd);
+ break;
+ case NEEDS_RETRY:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
+ break;
+ case ADD_TO_MLQUEUE:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ break;
+ default:
+ scsi_eh_scmd_add(cmd);
+ break;
}
}
@@ -1594,31 +1593,23 @@ static blk_status_t scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
+ if (unlikely(blk_should_fake_timeout(cmd->request->q)))
+ return;
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return;
trace_scsi_dispatch_cmd_done(cmd);
-
- /*
- * If the block layer didn't complete the request due to a timeout
- * injection, scsi must clear its internal completed state so that the
- * timeout handler will see it needs to escalate its own error
- * recovery.
- */
- if (unlikely(!blk_mq_complete_request(cmd->request)))
- clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
+ blk_mq_complete_request(cmd->request);
}
-static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
+static void scsi_mq_put_budget(struct request_queue *q)
{
- struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
atomic_dec(&sdev->device_busy);
}
-static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
+static bool scsi_mq_get_budget(struct request_queue *q)
{
- struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
return scsi_dev_queue_ready(q, sdev);
@@ -1685,7 +1676,7 @@ out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
out_put_budget:
- scsi_mq_put_budget(hctx);
+ scsi_mq_put_budget(q);
switch (ret) {
case BLK_STS_OK:
break;
@@ -1961,24 +1952,10 @@ void scsi_unblock_requests(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_unblock_requests);
-int __init scsi_init_queue(void)
-{
- scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
- sizeof(struct scsi_data_buffer),
- 0, 0, NULL);
- if (!scsi_sdb_cache) {
- printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
void scsi_exit_queue(void)
{
kmem_cache_destroy(scsi_sense_cache);
kmem_cache_destroy(scsi_sense_isadma_cache);
- kmem_cache_destroy(scsi_sdb_cache);
}
/**
@@ -2045,7 +2022,6 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
real_buffer[1] = data->medium_type;
real_buffer[2] = data->device_specific;
real_buffer[3] = data->block_descriptor_length;
-
cmd[0] = MODE_SELECT;
cmd[4] = len;
@@ -2131,7 +2107,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
(sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
- /*
+ /*
* Invalid command operation code
*/
sdev->use_10_for_ms = 0;
@@ -2140,7 +2116,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
}
}
- if(scsi_status_is_good(result)) {
+ if (scsi_status_is_good(result)) {
if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
(modepage == 6 || modepage == 8))) {
/* Initio breakage? */
@@ -2150,7 +2126,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
data->device_specific = 0;
data->longlba = 0;
data->block_descriptor_length = 0;
- } else if(use_10_for_ms) {
+ } else if (use_10_for_ms) {
data->length = buffer[0]*256 + buffer[1] + 2;
data->medium_type = buffer[2];
data->device_specific = buffer[3];
@@ -2233,7 +2209,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
goto illegal;
}
break;
-
+
case SDEV_RUNNING:
switch (oldstate) {
case SDEV_CREATED:
@@ -2518,7 +2494,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
* (which must be a legal transition). When the device is in this
* state, only special requests will be accepted, all others will
* be deferred. Since special requests may also be requeued requests,
- * a successful return doesn't guarantee the device will be
+ * a successful return doesn't guarantee the device will be
* totally quiescent.
*
* Must be called with user context, may sleep.
@@ -2644,10 +2620,10 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev)
return err;
}
- /*
+ /*
* The device has transitioned to SDEV_BLOCK. Stop the
* block layer from calling the midlayer with this device's
- * request queue.
+ * request queue.
*/
blk_mq_quiesce_queue_nowait(q);
return 0;
@@ -2682,7 +2658,7 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
return err;
}
-
+
void scsi_start_queue(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
@@ -2841,8 +2817,10 @@ scsi_host_block(struct Scsi_Host *shost)
mutex_lock(&sdev->state_mutex);
ret = scsi_internal_device_block_nowait(sdev);
mutex_unlock(&sdev->state_mutex);
- if (ret)
+ if (ret) {
+ scsi_device_put(sdev);
break;
+ }
}
/*