aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_lib.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-09 18:54:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-09 18:54:06 -0700
commit1c54fc1efe6922b4e7ffd591739d72050976ccd6 (patch)
tree0f7f0eaa91fa06bba11da240915eb6a4040b482a /drivers/scsi/scsi_lib.c
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input (diff)
parentMerge remote-tracking branch 'scsi-queue/drivers-for-3.16' into for-linus (diff)
downloadlinux-dev-1c54fc1efe6922b4e7ffd591739d72050976ccd6.tar.xz
linux-dev-1c54fc1efe6922b4e7ffd591739d72050976ccd6.zip
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This patch consists of the usual driver updates (qla2xxx, qla4xxx, lpfc, be2iscsi, fnic, ufs, NCR5380) The NCR5380 is the addition to maintained status of a long neglected driver for older hardware. In addition there are a lot of minor fixes and cleanups and some more updates to make scsi mq ready" * tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (130 commits) include/scsi/osd_protocol.h: remove unnecessary __constant mvsas: Recognise device/subsystem 9485/9485 as 88SE9485 Revert "be2iscsi: Fix processing cqe for cxn whose endpoint is freed" mptfusion: fix msgContext in mptctl_hp_hostinfo acornscsi: remove linked command support scsi/NCR5380: dprintk macro fusion: Remove use of DEF_SCSI_QCMD fusion: Add free msg frames to the head, not tail of list mpt2sas: Add free smids to the head, not tail of list mpt2sas: Remove use of DEF_SCSI_QCMD mpt2sas: Remove uses of serial_number mpt3sas: Remove use of DEF_SCSI_QCMD mpt3sas: Remove uses of serial_number qla2xxx: Use kmemdup instead of kmalloc + memcpy qla4xxx: Use kmemdup instead of kmalloc + memcpy qla2xxx: fix incorrect debug printk be2iscsi: Bump the driver version be2iscsi: Fix processing cqe for cxn whose endpoint is freed be2iscsi: Fix destroy MCC-CQ before MCC-EQ is destroyed be2iscsi: Fix memory corruption in MBX path ...
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r--drivers/scsi/scsi_lib.c225
1 files changed, 90 insertions, 135 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a0c95cac91f0..be0d5fad999d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
scsi_run_queue(sdev->request_queue);
}
-static void __scsi_release_buffers(struct scsi_cmnd *, int);
-
-/*
- * Function: scsi_end_request()
- *
- * Purpose: Post-processing of completed commands (usually invoked at end
- * of upper level post-processing and scsi_io_completion).
- *
- * Arguments: cmd - command that is complete.
- * error - 0 if I/O indicates success, < 0 for I/O error.
- * bytes - number of bytes of completed I/O
- * requeue - indicates whether we should requeue leftovers.
- *
- * Lock status: Assumed that lock is not held upon entry.
- *
- * Returns: cmd if requeue required, NULL otherwise.
- *
- * Notes: This is called for block device requests in order to
- * mark some number of sectors as complete.
- *
- * We are guaranteeing that the request queue will be goosed
- * at some point during this call.
- * Notes: If cmd was requeued, upon return it will be a stale pointer.
- */
-static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
- int bytes, int requeue)
-{
- struct request_queue *q = cmd->device->request_queue;
- struct request *req = cmd->request;
-
- /*
- * If there are blocks left over at the end, set up the command
- * to queue the remainder of them.
- */
- if (blk_end_request(req, error, bytes)) {
- /* kill remainder if no retrys */
- if (error && scsi_noretry_cmd(cmd))
- blk_end_request_all(req, error);
- else {
- if (requeue) {
- /*
- * Bleah. Leftovers again. Stick the
- * leftovers in the front of the
- * queue, and goose the queue again.
- */
- scsi_release_buffers(cmd);
- scsi_requeue_command(q, cmd);
- cmd = NULL;
- }
- return cmd;
- }
- }
-
- /*
- * This will goose the queue request function at the end, so we don't
- * need to worry about launching another command.
- */
- __scsi_release_buffers(cmd, 0);
- scsi_next_command(cmd);
- return NULL;
-}
-
static inline unsigned int scsi_sgtable_index(unsigned short nents)
{
unsigned int index;
@@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
}
-static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
-{
-
- if (cmd->sdb.table.nents)
- scsi_free_sgtable(&cmd->sdb);
-
- memset(&cmd->sdb, 0, sizeof(cmd->sdb));
-
- if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
- struct scsi_data_buffer *bidi_sdb =
- cmd->request->next_rq->special;
- scsi_free_sgtable(bidi_sdb);
- kmem_cache_free(scsi_sdb_cache, bidi_sdb);
- cmd->request->next_rq->special = NULL;
- }
-
- if (scsi_prot_sg_count(cmd))
- scsi_free_sgtable(cmd->prot_sdb);
-}
-
/*
* Function: scsi_release_buffers()
*
- * Purpose: Completion processing for block device I/O requests.
+ * Purpose: Free resources allocate for a scsi_command.
*
* Arguments: cmd - command that we are bailing.
*
@@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
* Notes: In the event that an upper level driver rejects a
* command, we must release resources allocated during
* the __init_io() function. Primarily this would involve
- * the scatter-gather table, and potentially any bounce
- * buffers.
+ * the scatter-gather table.
*/
void scsi_release_buffers(struct scsi_cmnd *cmd)
{
- __scsi_release_buffers(cmd, 1);
+ if (cmd->sdb.table.nents)
+ scsi_free_sgtable(&cmd->sdb);
+
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+
+ if (scsi_prot_sg_count(cmd))
+ scsi_free_sgtable(cmd->prot_sdb);
}
EXPORT_SYMBOL(scsi_release_buffers);
+static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
+{
+ struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
+
+ scsi_free_sgtable(bidi_sdb);
+ kmem_cache_free(scsi_sdb_cache, bidi_sdb);
+ cmd->request->next_rq->special = NULL;
+}
+
/**
* __scsi_error_from_host_byte - translate SCSI error code into errno
* @cmd: SCSI command (unused)
@@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
*
* Returns: Nothing
*
- * Notes: This function is matched in terms of capabilities to
- * the function that created the scatter-gather list.
- * In other words, if there are no bounce buffers
- * (the normal case for most drivers), we don't need
- * the logic to deal with cleaning up afterwards.
- *
- * We must call scsi_end_request(). This will finish off
- * the specified number of sectors. If we are done, the
- * command block will be released and the queue function
- * will be goosed. If we are not done then we have to
+ * Notes: We will finish off the specified number of sectors. If we
+ * are done, the command block will be released and the queue
+ * function will be goosed. If we are not done then we have to
* figure out what to do next:
*
* a) We can call scsi_requeue_command(). The request
@@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
* be used if we made forward progress, or if we want
* to switch from READ(10) to READ(6) for example.
*
- * b) We can call scsi_queue_insert(). The request will
+ * b) We can call __scsi_queue_insert(). The request will
* be put back on the queue and retried using the same
* command as before, possibly after a delay.
*
@@ -801,6 +726,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
req->next_rq->resid_len = scsi_in(cmd)->resid;
scsi_release_buffers(cmd);
+ scsi_release_bidi_buffers(cmd);
+
blk_end_request_all(req, 0);
scsi_next_command(cmd);
@@ -840,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
/*
- * A number of bytes were successfully read. If there
- * are leftovers and there is some kind of error
- * (result != 0), retry the rest.
+ * If we finished all bytes in the request we are done now.
*/
- if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
- return;
+ if (!blk_end_request(req, error, good_bytes))
+ goto next_command;
+
+ /*
+ * Kill remainder if no retrys.
+ */
+ if (error && scsi_noretry_cmd(cmd)) {
+ blk_end_request_all(req, error);
+ goto next_command;
+ }
+
+ /*
+ * If there had been no error, but we have leftover bytes in the
+ * requeues just queue the command up again.
+ */
+ if (result == 0)
+ goto requeue;
error = __scsi_error_from_host_byte(cmd, result);
@@ -973,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
- scsi_release_buffers(cmd);
if (!(req->cmd_flags & REQ_QUIET)) {
if (description)
scmd_printk(KERN_INFO, cmd, "%s\n",
@@ -983,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_print_sense("", cmd);
scsi_print_command(cmd);
}
- if (blk_end_request_err(req, error))
- scsi_requeue_command(q, cmd);
- else
- scsi_next_command(cmd);
- break;
+ if (!blk_end_request_err(req, error))
+ goto next_command;
+ /*FALLTHRU*/
case ACTION_REPREP:
+ requeue:
/* Unprep the request and put it back at the head of the queue.
* A new command will be prepared and issued.
*/
@@ -1004,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
break;
}
+ return;
+
+next_command:
+ scsi_release_buffers(cmd);
+ scsi_next_command(cmd);
}
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1128,15 +1071,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd;
- int ret = scsi_prep_state_check(sdev, req);
-
- if (ret != BLKPREP_OK)
- return ret;
-
- cmd = scsi_get_cmd_from_req(sdev, req);
- if (unlikely(!cmd))
- return BLKPREP_DEFER;
+ struct scsi_cmnd *cmd = req->special;
/*
* BLOCK_PC requests may transfer data, in which case they must
@@ -1179,15 +1114,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
*/
int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd;
- int ret = scsi_prep_state_check(sdev, req);
-
- if (ret != BLKPREP_OK)
- return ret;
+ struct scsi_cmnd *cmd = req->special;
if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
&& sdev->scsi_dh_data->scsi_dh->prep_fn)) {
- ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+ int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
if (ret != BLKPREP_OK)
return ret;
}
@@ -1197,16 +1128,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
*/
BUG_ON(!req->nr_phys_segments);
- cmd = scsi_get_cmd_from_req(sdev, req);
- if (unlikely(!cmd))
- return BLKPREP_DEFER;
-
memset(cmd->cmnd, 0, BLK_MAX_CDB);
return scsi_init_io(cmd, GFP_ATOMIC);
}
EXPORT_SYMBOL(scsi_setup_fs_cmnd);
-int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+static int
+scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
{
int ret = BLKPREP_OK;
@@ -1258,9 +1186,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
}
return ret;
}
-EXPORT_SYMBOL(scsi_prep_state_check);
-int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
+static int
+scsi_prep_return(struct request_queue *q, struct request *req, int ret)
{
struct scsi_device *sdev = q->queuedata;
@@ -1291,18 +1219,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
return ret;
}
-EXPORT_SYMBOL(scsi_prep_return);
-int scsi_prep_fn(struct request_queue *q, struct request *req)
+static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
- int ret = BLKPREP_KILL;
+ struct scsi_cmnd *cmd;
+ int ret;
- if (req->cmd_type == REQ_TYPE_BLOCK_PC)
+ ret = scsi_prep_state_check(sdev, req);
+ if (ret != BLKPREP_OK)
+ goto out;
+
+ cmd = scsi_get_cmd_from_req(sdev, req);
+ if (unlikely(!cmd)) {
+ ret = BLKPREP_DEFER;
+ goto out;
+ }
+
+ if (req->cmd_type == REQ_TYPE_FS)
+ ret = scsi_cmd_to_driver(cmd)->init_command(cmd);
+ else if (req->cmd_type == REQ_TYPE_BLOCK_PC)
ret = scsi_setup_blk_pc_cmnd(sdev, req);
+ else
+ ret = BLKPREP_KILL;
+
+out:
return scsi_prep_return(q, req, ret);
}
-EXPORT_SYMBOL(scsi_prep_fn);
+
+static void scsi_unprep_fn(struct request_queue *q, struct request *req)
+{
+ if (req->cmd_type == REQ_TYPE_FS) {
+ struct scsi_cmnd *cmd = req->special;
+ struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
+
+ if (drv->uninit_command)
+ drv->uninit_command(cmd);
+ }
+}
/*
* scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
@@ -1723,6 +1677,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return NULL;
blk_queue_prep_rq(q, scsi_prep_fn);
+ blk_queue_unprep_rq(q, scsi_unprep_fn);
blk_queue_softirq_done(q, scsi_softirq_done);
blk_queue_rq_timed_out(q, scsi_times_out);
blk_queue_lld_busy(q, scsi_lld_busy);