diff options
Diffstat (limited to 'drivers/scsi/scsi_error.c')
-rw-r--r-- | drivers/scsi/scsi_error.c | 443 |
1 files changed, 248 insertions, 195 deletions
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index ae2fa170f6ad..6995c8979230 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -50,8 +50,6 @@ #include <asm/unaligned.h> -static void scsi_eh_done(struct scsi_cmnd *scmd); - /* * These should *probably* be handled by the host itself. * Since it is allowed to sleep, it probably should. @@ -60,8 +58,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd); #define HOST_RESET_SETTLE_TIME (10) static int scsi_eh_try_stu(struct scsi_cmnd *scmd); -static int scsi_try_to_abort_cmd(struct scsi_host_template *, - struct scsi_cmnd *); +static enum scsi_disposition scsi_try_to_abort_cmd(struct scsi_host_template *, + struct scsi_cmnd *); void scsi_eh_wakeup(struct Scsi_Host *shost) { @@ -116,13 +114,32 @@ static int scsi_host_eh_past_deadline(struct Scsi_Host *shost) return 1; } +static bool scsi_cmd_retry_allowed(struct scsi_cmnd *cmd) +{ + if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) + return true; + + return ++cmd->retries <= cmd->allowed; +} + +static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct Scsi_Host *host = sdev->host; + + if (host->hostt->eh_should_retry_cmd) + return host->hostt->eh_should_retry_cmd(cmd); + + return true; +} + /** * scmd_eh_abort_handler - Handle command aborts * @work: command to be aborted. * * Note: this function must be called only for a command that has timed out. * Because the block layer marks a request as complete before it calls - * scsi_times_out(), a .scsi_done() call from the LLD for a command that has + * scsi_timeout(), a .scsi_done() call from the LLD for a command that has * timed out do not have any effect. Hence it is safe to call * scsi_finish_command() from this function. */ @@ -132,46 +149,71 @@ scmd_eh_abort_handler(struct work_struct *work) struct scsi_cmnd *scmd = container_of(work, struct scsi_cmnd, abort_work.work); struct scsi_device *sdev = scmd->device; - int rtn; + struct Scsi_Host *shost = sdev->host; + enum scsi_disposition rtn; + unsigned long flags; - if (scsi_host_eh_past_deadline(sdev->host)) { + if (scsi_host_eh_past_deadline(shost)) { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "eh timeout, not aborting\n")); - } else { - SCSI_LOG_ERROR_RECOVERY(3, + goto out; + } + + SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "aborting command\n")); - rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); - if (rtn == SUCCESS) { - set_host_byte(scmd, DID_TIME_OUT); - if (scsi_host_eh_past_deadline(sdev->host)) { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "eh timeout, not retrying " - "aborted command\n")); - } else if (!scsi_noretry_cmd(scmd) && - (++scmd->retries <= scmd->allowed)) { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_WARNING, scmd, - "retry aborted command\n")); - scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); - return; - } else { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_WARNING, scmd, - "finish aborted command\n")); - scsi_finish_command(scmd); - return; - } - } else { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "cmd abort %s\n", - (rtn == FAST_IO_FAIL) ? - "not send" : "failed")); - } + rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); + if (rtn != SUCCESS) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "cmd abort %s\n", + (rtn == FAST_IO_FAIL) ? + "not send" : "failed")); + goto out; + } + set_host_byte(scmd, DID_TIME_OUT); + if (scsi_host_eh_past_deadline(shost)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_INFO, scmd, + "eh timeout, not retrying " + "aborted command\n")); + goto out; + } + + spin_lock_irqsave(shost->host_lock, flags); + list_del_init(&scmd->eh_entry); + + /* + * If the abort succeeds, and there is no further + * EH action, clear the ->last_reset time. + */ + if (list_empty(&shost->eh_abort_list) && + list_empty(&shost->eh_cmd_q)) + if (shost->eh_deadline != -1) + shost->last_reset = 0; + + spin_unlock_irqrestore(shost->host_lock, flags); + + if (!scsi_noretry_cmd(scmd) && + scsi_cmd_retry_allowed(scmd) && + scsi_eh_should_retry_cmd(scmd)) { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "retry aborted command\n")); + scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); + } else { + SCSI_LOG_ERROR_RECOVERY(3, + scmd_printk(KERN_WARNING, scmd, + "finish aborted command\n")); + scsi_finish_command(scmd); } + return; + +out: + spin_lock_irqsave(shost->host_lock, flags); + list_del_init(&scmd->eh_entry); + spin_unlock_irqrestore(shost->host_lock, flags); scsi_eh_scmd_add(scmd); } @@ -203,6 +245,8 @@ scsi_abort_command(struct scsi_cmnd *scmd) spin_lock_irqsave(shost->host_lock, flags); if (shost->eh_deadline != -1 && !shost->last_reset) shost->last_reset = jiffies; + BUG_ON(!list_empty(&scmd->eh_entry)); + list_add_tail(&scmd->eh_entry, &shost->eh_abort_list); spin_unlock_irqrestore(shost->host_lock, flags); scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; @@ -222,7 +266,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) */ static void scsi_eh_reset(struct scsi_cmnd *scmd) { - if (!blk_rq_is_passthrough(scmd->request)) { + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) { struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); if (sdrv->eh_reset) sdrv->eh_reset(scmd); @@ -272,7 +316,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) } /** - * scsi_times_out - Timeout function for normal scsi commands. + * scsi_timeout - Timeout function for normal scsi commands. * @req: request that is timing out. * * Notes: @@ -281,7 +325,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) * normal completion function determines that the timer has already * fired, then it mustn't do anything. */ -enum blk_eh_timer_return scsi_times_out(struct request *req) +enum blk_eh_timer_return scsi_timeout(struct request *req) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); enum blk_eh_timer_return rtn = BLK_EH_DONE; @@ -290,6 +334,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) trace_scsi_dispatch_cmd_timeout(scmd); scsi_log_completion(scmd, TIMEOUT_ERROR); + atomic_inc(&scmd->device->iotmo_cnt); if (host->eh_deadline != -1 && !host->last_reset) host->last_reset = jiffies; @@ -419,14 +464,12 @@ static void scsi_report_sense(struct scsi_device *sdev, evt_type = SDEV_EVT_LUN_CHANGE_REPORTED; scsi_report_lun_change(sdev); sdev_printk(KERN_WARNING, sdev, - "Warning! Received an indication that the " "LUN assignments on this target have " "changed. The Linux SCSI layer does not " "automatically remap LUN assignments.\n"); } else if (sshdr->asc == 0x3f) sdev_printk(KERN_WARNING, sdev, - "Warning! Received an indication that the " - "operating parameters on this target have " + "Operating parameters on this target have " "changed. The Linux SCSI layer does not " "automatically adjust these parameters.\n"); @@ -440,8 +483,13 @@ static void scsi_report_sense(struct scsi_device *sdev, if (sshdr->asc == 0x29) { evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED; - sdev_printk(KERN_WARNING, sdev, - "Power-on or device reset occurred\n"); + /* + * Do not print message if it is an expected side-effect + * of runtime PM. + */ + if (!sdev->silence_suspend) + sdev_printk(KERN_WARNING, sdev, + "Power-on or device reset occurred\n"); } if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) { @@ -467,6 +515,11 @@ static void scsi_report_sense(struct scsi_device *sdev, } } +static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status) +{ + cmd->result = (cmd->result & 0xffff00ff) | (status << 8); +} + /** * scsi_check_sense - Examine scsi cmd sense * @scmd: Cmd to have sense checked. @@ -478,7 +531,7 @@ static void scsi_report_sense(struct scsi_device *sdev, * When a deferred error is detected the current command has * not been executed and needs retrying. */ -int scsi_check_sense(struct scsi_cmnd *scmd) +enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) { struct scsi_device *sdev = scmd->device; struct scsi_sense_hdr sshdr; @@ -492,7 +545,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd) return NEEDS_RETRY; if (sdev->handler && sdev->handler->check_sense) { - int rc; + enum scsi_disposition rc; rc = sdev->handler->check_sense(sdev, &sshdr); if (rc != SCSI_RETURN_NOT_HANDLED) @@ -500,7 +553,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd) /* handler does not care. Drop down to default handling */ } - if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) + if (scmd->cmnd[0] == TEST_UNIT_READY && + scmd->submitter != SUBMITTED_BY_SCSI_ERROR_HANDLER) /* * nasty: for mid-layer issued TURs, we need to return the * actual sense data without any recovery attempt. For eh @@ -596,22 +650,22 @@ int scsi_check_sense(struct scsi_cmnd *scmd) case DATA_PROTECT: if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) { /* Thin provisioning hard threshold reached */ - set_host_byte(scmd, DID_ALLOC_FAILURE); + set_scsi_ml_byte(scmd, SCSIML_STAT_NOSPC); return SUCCESS; } - /* FALLTHROUGH */ + fallthrough; case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: case BLANK_CHECK: - set_host_byte(scmd, DID_TARGET_FAILURE); + set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE); return SUCCESS; case MEDIUM_ERROR: if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ sshdr.asc == 0x13 || /* AMNF DATA FIELD */ sshdr.asc == 0x14) { /* RECORD NOT FOUND */ - set_host_byte(scmd, DID_MEDIUM_ERROR); + set_scsi_ml_byte(scmd, SCSIML_STAT_MED_ERROR); return SUCCESS; } return NEEDS_RETRY; @@ -620,8 +674,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->retry_hwerror) return ADD_TO_MLQUEUE; else - set_host_byte(scmd, DID_TARGET_FAILURE); - /* FALLTHROUGH */ + set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE); + fallthrough; case ILLEGAL_REQUEST: if (sshdr.asc == 0x20 || /* Invalid command operation code */ @@ -630,7 +684,7 @@ int scsi_check_sense(struct scsi_cmnd *scmd) sshdr.asc == 0x24 || /* Invalid field in cdb */ sshdr.asc == 0x26 || /* Parameter value invalid */ sshdr.asc == 0x27) { /* Write protected */ - set_host_byte(scmd, DID_TARGET_FAILURE); + set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE); } return SUCCESS; @@ -703,7 +757,7 @@ static void scsi_handle_queue_full(struct scsi_device *sdev) * don't allow for the possibility of retries here, and we are a lot * more restrictive about what we consider acceptable. */ -static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) +static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd) { /* * first check the host byte, to see if there is anything in there @@ -722,41 +776,35 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) return FAILED; /* - * next, check the message byte. - */ - if (msg_byte(scmd->result) != COMMAND_COMPLETE) - return FAILED; - - /* * now, check the status byte to see if this indicates * anything special. */ - switch (status_byte(scmd->result)) { - case GOOD: + switch (get_status_byte(scmd)) { + case SAM_STAT_GOOD: scsi_handle_queue_ramp_up(scmd->device); - /* FALLTHROUGH */ - case COMMAND_TERMINATED: + fallthrough; + case SAM_STAT_COMMAND_TERMINATED: return SUCCESS; - case CHECK_CONDITION: + case SAM_STAT_CHECK_CONDITION: return scsi_check_sense(scmd); - case CONDITION_GOOD: - case INTERMEDIATE_GOOD: - case INTERMEDIATE_C_GOOD: + case SAM_STAT_CONDITION_MET: + case SAM_STAT_INTERMEDIATE: + case SAM_STAT_INTERMEDIATE_CONDITION_MET: /* * who knows? FIXME(eric) */ return SUCCESS; - case RESERVATION_CONFLICT: + case SAM_STAT_RESERVATION_CONFLICT: if (scmd->cmnd[0] == TEST_UNIT_READY) /* it is a success, we probed the device and * found it */ return SUCCESS; /* otherwise, we failed to send the command */ return FAILED; - case QUEUE_FULL: + case SAM_STAT_TASK_SET_FULL: scsi_handle_queue_full(scmd->device); - /* fall through */ - case BUSY: + fallthrough; + case SAM_STAT_BUSY: return NEEDS_RETRY; default: return FAILED; @@ -768,7 +816,7 @@ static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) * scsi_eh_done - Completion function for error handling. * @scmd: Cmd that is done. */ -static void scsi_eh_done(struct scsi_cmnd *scmd) +void scsi_eh_done(struct scsi_cmnd *scmd) { struct completion *eh_action; @@ -784,10 +832,10 @@ static void scsi_eh_done(struct scsi_cmnd *scmd) * scsi_try_host_reset - ask host adapter to reset itself * @scmd: SCSI cmd to send host reset. */ -static int scsi_try_host_reset(struct scsi_cmnd *scmd) +static enum scsi_disposition scsi_try_host_reset(struct scsi_cmnd *scmd) { unsigned long flags; - int rtn; + enum scsi_disposition rtn; struct Scsi_Host *host = scmd->device->host; struct scsi_host_template *hostt = host->hostt; @@ -814,10 +862,10 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd) * scsi_try_bus_reset - ask host to perform a bus reset * @scmd: SCSI cmd to send bus reset. */ -static int scsi_try_bus_reset(struct scsi_cmnd *scmd) +static enum scsi_disposition scsi_try_bus_reset(struct scsi_cmnd *scmd) { unsigned long flags; - int rtn; + enum scsi_disposition rtn; struct Scsi_Host *host = scmd->device->host; struct scsi_host_template *hostt = host->hostt; @@ -856,10 +904,10 @@ static void __scsi_report_device_reset(struct scsi_device *sdev, void *data) * timer on it, and set the host back to a consistent state prior to * returning. */ -static int scsi_try_target_reset(struct scsi_cmnd *scmd) +static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd) { unsigned long flags; - int rtn; + enum scsi_disposition rtn; struct Scsi_Host *host = scmd->device->host; struct scsi_host_template *hostt = host->hostt; @@ -887,9 +935,9 @@ static int scsi_try_target_reset(struct scsi_cmnd *scmd) * timer on it, and set the host back to a consistent state prior to * returning. */ -static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) +static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd) { - int rtn; + enum scsi_disposition rtn; struct scsi_host_template *hostt = scmd->device->host->hostt; if (!hostt->eh_device_reset_handler) @@ -918,8 +966,8 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) * if the device is temporarily unavailable (eg due to a * link down on FibreChannel) */ -static int scsi_try_to_abort_cmd(struct scsi_host_template *hostt, - struct scsi_cmnd *scmd) +static enum scsi_disposition +scsi_try_to_abort_cmd(struct scsi_host_template *hostt, struct scsi_cmnd *scmd) { if (!hostt->eh_abort_handler) return FAILED; @@ -941,7 +989,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd) * @scmd: SCSI command structure to hijack * @ses: structure to save restore information * @cmnd: CDB to send. Can be NULL if no new cmnd is needed - * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB) + * @cmnd_size: size in bytes of @cmnd (must be <= MAX_COMMAND_SIZE) * @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored) * * This function is used to save a scsi command information before re-execution @@ -963,22 +1011,21 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, * command. */ ses->cmd_len = scmd->cmd_len; - ses->cmnd = scmd->cmnd; ses->data_direction = scmd->sc_data_direction; ses->sdb = scmd->sdb; ses->result = scmd->result; - ses->resid_len = scmd->req.resid_len; + ses->resid_len = scmd->resid_len; ses->underflow = scmd->underflow; ses->prot_op = scmd->prot_op; ses->eh_eflags = scmd->eh_eflags; scmd->prot_op = SCSI_PROT_NORMAL; scmd->eh_eflags = 0; - scmd->cmnd = ses->eh_cmnd; - memset(scmd->cmnd, 0, BLK_MAX_CDB); + memcpy(ses->cmnd, scmd->cmnd, sizeof(ses->cmnd)); + memset(scmd->cmnd, 0, sizeof(scmd->cmnd)); memset(&scmd->sdb, 0, sizeof(scmd->sdb)); scmd->result = 0; - scmd->req.resid_len = 0; + scmd->resid_len = 0; if (sense_bytes) { scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, @@ -994,7 +1041,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, } else { scmd->sc_data_direction = DMA_NONE; if (cmnd) { - BUG_ON(cmnd_size > BLK_MAX_CDB); + BUG_ON(cmnd_size > sizeof(scmd->cmnd)); memcpy(scmd->cmnd, cmnd, cmnd_size); scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); } @@ -1027,11 +1074,11 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) * Restore original data */ scmd->cmd_len = ses->cmd_len; - scmd->cmnd = ses->cmnd; + memcpy(scmd->cmnd, ses->cmnd, sizeof(ses->cmnd)); scmd->sc_data_direction = ses->data_direction; scmd->sdb = ses->sdb; scmd->result = ses->result; - scmd->req.resid_len = ses->resid_len; + scmd->resid_len = ses->resid_len; scmd->underflow = ses->underflow; scmd->prot_op = ses->prot_op; scmd->eh_eflags = ses->eh_eflags; @@ -1052,8 +1099,8 @@ EXPORT_SYMBOL(scsi_eh_restore_cmnd); * Return value: * SUCCESS or FAILED or NEEDS_RETRY */ -static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, - int cmnd_size, int timeout, unsigned sense_bytes) +static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd, + unsigned char *cmnd, int cmnd_size, int timeout, unsigned sense_bytes) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; @@ -1068,7 +1115,7 @@ retry: shost->eh_action = &done; scsi_log_send(scmd); - scmd->scsi_done = scsi_eh_done; + scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER; /* * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can @@ -1089,12 +1136,13 @@ retry: if (sdev->sdev_state != SDEV_BLOCK) rtn = shost->hostt->queuecommand(shost, scmd); else - rtn = SCSI_MLQUEUE_DEVICE_BUSY; + rtn = FAILED; mutex_unlock(&sdev->state_mutex); if (rtn) { if (timeleft > stall_for) { scsi_eh_restore_cmnd(scmd, &ses); + timeleft -= stall_for; msleep(jiffies_to_msecs(stall_for)); goto retry; @@ -1160,14 +1208,15 @@ retry: * that we obtain it on our own. This function will *not* return until * the command either times out, or it completes. */ -static int scsi_request_sense(struct scsi_cmnd *scmd) +static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd) { return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0); } -static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) +static enum scsi_disposition +scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn) { - if (!blk_rq_is_passthrough(scmd->request)) { + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) { struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); if (sdrv->eh_action) rtn = sdrv->eh_action(scmd, rtn); @@ -1218,7 +1267,7 @@ int scsi_eh_get_sense(struct list_head *work_q, { struct scsi_cmnd *scmd, *next; struct Scsi_Host *shost; - int rtn; + enum scsi_disposition rtn; /* * If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO, @@ -1237,7 +1286,7 @@ int scsi_eh_get_sense(struct list_head *work_q, current->comm)); break; } - if (status_byte(scmd->result) != CHECK_CONDITION) + if (!scsi_status_is_check_condition(scmd->result)) /* * don't request sense if there's no check condition * status because the error we're processing isn't one @@ -1264,11 +1313,18 @@ int scsi_eh_get_sense(struct list_head *work_q, * upper level. */ if (rtn == SUCCESS) - /* we don't want this command reissued, just - * finished with the sense data, so set - * retries to the max allowed to ensure it - * won't get reissued */ - scmd->retries = scmd->allowed; + /* + * We don't want this command reissued, just finished + * with the sense data, so set retries to the max + * allowed to ensure it won't get reissued. If the user + * has requested infinite retries, we also want to + * finish this command, so force completion by setting + * retries and allowed to the same value. + */ + if (scmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) + scmd->retries = scmd->allowed = 1; + else + scmd->retries = scmd->allowed; else if (rtn != NEEDS_RETRY) continue; @@ -1289,7 +1345,8 @@ EXPORT_SYMBOL_GPL(scsi_eh_get_sense); static int scsi_eh_tur(struct scsi_cmnd *scmd) { static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; - int retry_cnt = 1, rtn; + int retry_cnt = 1; + enum scsi_disposition rtn; retry_tur: rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, @@ -1302,7 +1359,7 @@ retry_tur: case NEEDS_RETRY: if (retry_cnt--) goto retry_tur; - /*FALLTHRU*/ + fallthrough; case SUCCESS: return 0; default: @@ -1377,10 +1434,12 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd) static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; if (scmd->device->allow_restart) { - int i, rtn = NEEDS_RETRY; + int i; + enum scsi_disposition rtn = NEEDS_RETRY; for (i = 0; rtn == NEEDS_RETRY && i < 2; i++) - rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0); + rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, + scmd->device->eh_timeout, 0); if (rtn == SUCCESS) return 0; @@ -1412,6 +1471,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost, sdev_printk(KERN_INFO, sdev, "%s: skip START_UNIT, past eh deadline\n", current->comm)); + scsi_device_put(sdev); break; } stu_scmd = NULL; @@ -1470,7 +1530,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, { struct scsi_cmnd *scmd, *bdr_scmd, *next; struct scsi_device *sdev; - int rtn; + enum scsi_disposition rtn; shost_for_each_device(sdev, shost) { if (scsi_host_eh_past_deadline(shost)) { @@ -1478,6 +1538,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, sdev_printk(KERN_INFO, sdev, "%s: skip BDR, past eh deadline\n", current->comm)); + scsi_device_put(sdev); break; } bdr_scmd = NULL; @@ -1536,7 +1597,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, while (!list_empty(&tmp_list)) { struct scsi_cmnd *next, *scmd; - int rtn; + enum scsi_disposition rtn; unsigned int id; if (scsi_host_eh_past_deadline(shost)) { @@ -1594,7 +1655,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, struct scsi_cmnd *scmd, *chan_scmd, *next; LIST_HEAD(check_list); unsigned int channel; - int rtn; + enum scsi_disposition rtn; /* * we really want to loop over the various channels, and do this on @@ -1665,7 +1726,7 @@ static int scsi_eh_host_reset(struct Scsi_Host *shost, { struct scsi_cmnd *scmd, *next; LIST_HEAD(check_list); - int rtn; + enum scsi_disposition rtn; if (!list_empty(work_q)) { scmd = list_entry(work_q->next, @@ -1722,39 +1783,39 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, * scsi_noretry_cmd - determine if command should be failed fast * @scmd: SCSI cmd to examine. */ -int scsi_noretry_cmd(struct scsi_cmnd *scmd) +bool scsi_noretry_cmd(struct scsi_cmnd *scmd) { + struct request *req = scsi_cmd_to_rq(scmd); + switch (host_byte(scmd->result)) { case DID_OK: break; case DID_TIME_OUT: goto check_type; case DID_BUS_BUSY: - return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT); + return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT); case DID_PARITY: - return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); + return !!(req->cmd_flags & REQ_FAILFAST_DEV); case DID_ERROR: - if (msg_byte(scmd->result) == COMMAND_COMPLETE && - status_byte(scmd->result) == RESERVATION_CONFLICT) - return 0; - /* fall through */ + if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT) + return false; + fallthrough; case DID_SOFT_ERROR: - return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); + return !!(req->cmd_flags & REQ_FAILFAST_DRIVER); } - if (status_byte(scmd->result) != CHECK_CONDITION) - return 0; + if (!scsi_status_is_check_condition(scmd->result)) + return false; check_type: /* * assume caller has checked sense and determined * the check condition was retryable. */ - if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || - blk_rq_is_passthrough(scmd->request)) - return 1; - else - return 0; + if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req)) + return true; + + return false; } /** @@ -1771,9 +1832,9 @@ check_type: * doesn't require the error handler read (i.e. we don't need to * abort/reset), this function should return SUCCESS. */ -int scsi_decide_disposition(struct scsi_cmnd *scmd) +enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd) { - int rtn; + enum scsi_disposition rtn; /* * if the device is offline, then we clearly just pass the result back @@ -1808,7 +1869,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) set_host_byte(scmd, DID_TIME_OUT); return SUCCESS; } - /* FALLTHROUGH */ + fallthrough; case DID_NO_CONNECT: case DID_BAD_TARGET: /* @@ -1844,15 +1905,20 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * the fast io fail tmo fired), so send IO directly upwards. */ return SUCCESS; + case DID_TRANSPORT_MARGINAL: + /* + * caller has decided not to do retries on + * abort success, so send IO directly upwards + */ + return SUCCESS; case DID_ERROR: - if (msg_byte(scmd->result) == COMMAND_COMPLETE && - status_byte(scmd->result) == RESERVATION_CONFLICT) + if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT) /* * execute reservation conflict processing code * lower down */ break; - /* fallthrough */ + fallthrough; case DID_BUS_BUSY: case DID_PARITY: goto maybe_retry; @@ -1875,23 +1941,17 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) } /* - * next, check the message byte. - */ - if (msg_byte(scmd->result) != COMMAND_COMPLETE) - return FAILED; - - /* * check the status byte to see if this indicates anything special. */ - switch (status_byte(scmd->result)) { - case QUEUE_FULL: + switch (get_status_byte(scmd)) { + case SAM_STAT_TASK_SET_FULL: scsi_handle_queue_full(scmd->device); /* * the case of trying to send too many commands to a * tagged queueing device. */ - /* FALLTHROUGH */ - case BUSY: + fallthrough; + case SAM_STAT_BUSY: /* * device can't talk to us at the moment. Should only * occur (SAM-3) when the task queue is empty, so will cause @@ -1899,16 +1959,16 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * device. */ return ADD_TO_MLQUEUE; - case GOOD: + case SAM_STAT_GOOD: if (scmd->cmnd[0] == REPORT_LUNS) scmd->device->sdev_target->expecting_lun_change = 0; scsi_handle_queue_ramp_up(scmd->device); - /* FALLTHROUGH */ - case COMMAND_TERMINATED: + fallthrough; + case SAM_STAT_COMMAND_TERMINATED: return SUCCESS; - case TASK_ABORTED: + case SAM_STAT_TASK_ABORTED: goto maybe_retry; - case CHECK_CONDITION: + case SAM_STAT_CHECK_CONDITION: rtn = scsi_check_sense(scmd); if (rtn == NEEDS_RETRY) goto maybe_retry; @@ -1917,22 +1977,20 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) * to collect the sense and redo the decide * disposition */ return rtn; - case CONDITION_GOOD: - case INTERMEDIATE_GOOD: - case INTERMEDIATE_C_GOOD: - case ACA_ACTIVE: + case SAM_STAT_CONDITION_MET: + case SAM_STAT_INTERMEDIATE: + case SAM_STAT_INTERMEDIATE_CONDITION_MET: + case SAM_STAT_ACA_ACTIVE: /* * who knows? FIXME(eric) */ return SUCCESS; - case RESERVATION_CONFLICT: + case SAM_STAT_RESERVATION_CONFLICT: sdev_printk(KERN_INFO, scmd->device, "reservation conflict\n"); - set_host_byte(scmd, DID_NEXUS_FAILURE); + set_scsi_ml_byte(scmd, SCSIML_STAT_RESV_CONFLICT); return SUCCESS; /* causes immediate i/o error */ - default: - return FAILED; } return FAILED; @@ -1942,8 +2000,7 @@ maybe_retry: * the request was not marked fast fail. Note that above, * even if the request is marked fast fail, we still requeue * for queue congestion conditions (QUEUE_FULL or BUSY) */ - if ((++scmd->retries) <= scmd->allowed - && !scsi_noretry_cmd(scmd)) { + if (scsi_cmd_retry_allowed(scmd) && !scsi_noretry_cmd(scmd)) { return NEEDS_RETRY; } else { /* @@ -1953,9 +2010,11 @@ maybe_retry: } } -static void eh_lock_door_done(struct request *req, blk_status_t status) +static enum rq_end_io_ret eh_lock_door_done(struct request *req, + blk_status_t status) { - blk_put_request(req); + blk_mq_free_request(req); + return RQ_END_IO_NONE; } /** @@ -1971,27 +2030,28 @@ static void eh_lock_door_done(struct request *req, blk_status_t status) */ static void scsi_eh_lock_door(struct scsi_device *sdev) { + struct scsi_cmnd *scmd; struct request *req; - struct scsi_request *rq; - req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0); + req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0); if (IS_ERR(req)) return; - rq = scsi_req(req); + scmd = blk_mq_rq_to_pdu(req); - rq->cmd[0] = ALLOW_MEDIUM_REMOVAL; - rq->cmd[1] = 0; - rq->cmd[2] = 0; - rq->cmd[3] = 0; - rq->cmd[4] = SCSI_REMOVAL_PREVENT; - rq->cmd[5] = 0; - rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); + scmd->cmnd[0] = ALLOW_MEDIUM_REMOVAL; + scmd->cmnd[1] = 0; + scmd->cmnd[2] = 0; + scmd->cmnd[3] = 0; + scmd->cmnd[4] = SCSI_REMOVAL_PREVENT; + scmd->cmnd[5] = 0; + scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); + scmd->allowed = 5; req->rq_flags |= RQF_QUIET; req->timeout = 10 * HZ; - rq->retries = 5; + req->end_io = eh_lock_door_done; - blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done); + blk_execute_rq_nowait(req, true); } /** @@ -2089,8 +2149,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q) list_for_each_entry_safe(scmd, next, done_q, eh_entry) { list_del_init(&scmd->eh_entry); if (scsi_device_online(scmd->device) && - !scsi_noretry_cmd(scmd) && - (++scmd->retries <= scmd->allowed)) { + !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) && + scsi_eh_should_retry_cmd(scmd)) { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "%s: flush retry cmd\n", @@ -2100,10 +2160,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q) /* * If just we got sense for the device (called * scsi_eh_get_sense), scmd->result is already - * set, do not set DRIVER_TIMEOUT. + * set, do not set DID_TIME_OUT. */ if (!scmd->result) - scmd->result |= (DRIVER_TIMEOUT << 24); + scmd->result |= (DID_TIME_OUT << 16); SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "%s: flush finish cmd\n", @@ -2314,11 +2374,6 @@ void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) } EXPORT_SYMBOL(scsi_report_device_reset); -static void -scsi_reset_provider_done_command(struct scsi_cmnd *scmd) -{ -} - /** * scsi_ioctl_reset: explicitly reset a host/bus/target/device * @dev: scsi_device to operate on @@ -2331,7 +2386,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) struct Scsi_Host *shost = dev->host; struct request *rq; unsigned long flags; - int error = 0, rtn, val; + int error = 0, val; + enum scsi_disposition rtn; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; @@ -2352,10 +2408,8 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) scmd = (struct scsi_cmnd *)(rq + 1); scsi_init_command(dev, scmd); - scmd->request = rq; - scmd->cmnd = scsi_req(rq)->cmd; - scmd->scsi_done = scsi_reset_provider_done_command; + scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL; memset(&scmd->sdb, 0, sizeof(scmd->sdb)); scmd->cmd_len = 0; @@ -2374,22 +2428,22 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) rtn = scsi_try_bus_device_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_TARGET: rtn = scsi_try_target_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_BUS: rtn = scsi_try_bus_reset(scmd); if (rtn == SUCCESS || (val & SG_SCSI_RESET_NO_ESCALATE)) break; - /* FALLTHROUGH */ + fallthrough; case SG_SCSI_RESET_HOST: rtn = scsi_try_host_reset(scmd); if (rtn == SUCCESS) break; - /* FALLTHROUGH */ + fallthrough; default: rtn = FAILED; break; @@ -2412,7 +2466,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) wake_up(&shost->host_wait); scsi_run_host_queues(shost); - scsi_put_command(scmd); kfree(rq); out_put_autopm_host: |