aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2018-02-06 06:48:30 -0800
committerSagi Grimberg <sagi@grimberg.me>2018-02-11 10:45:43 +0200
commitc3aedd225f8bcc3b3e61df074bc045b80542b38a (patch)
treeb5cb438d348ed5083c4fc2f0919519400939e88f /drivers/nvme
parentnvme_fc: correct abort race condition on resets (diff)
downloadlinux-dev-c3aedd225f8bcc3b3e61df074bc045b80542b38a.tar.xz
linux-dev-c3aedd225f8bcc3b3e61df074bc045b80542b38a.zip
nvme_fc: cleanup io completion
There was some old cold that dealt with complete_rq being called prior to the lldd returning the io completion. This is garbage code. The complete_rq routine was being called after eh_timeouts were called and it was due to eh_timeouts not being handled properly. The timeouts were fixed in prior patches so that in general, a timeout will initiate an abort and the reset timer restarted as the abort operation will take care of completing things. Given the reset timer restarted, the erroneous complete_rq calls were eliminated. So remove the work that was synchronizing complete_rq with io completion. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: James Smart <james.smart@broadcom.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/fc.c63
1 files changed, 12 insertions, 51 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 4673882ce152..7f51f8414b97 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -55,9 +55,7 @@ struct nvme_fc_queue {
enum nvme_fcop_flags {
FCOP_FLAGS_TERMIO = (1 << 0),
- FCOP_FLAGS_RELEASED = (1 << 1),
- FCOP_FLAGS_COMPLETE = (1 << 2),
- FCOP_FLAGS_AEN = (1 << 3),
+ FCOP_FLAGS_AEN = (1 << 1),
};
struct nvmefc_ls_req_op {
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
/* *********************** NVME Ctrl Routines **************************** */
-static void __nvme_fc_final_op_cleanup(struct request *rq);
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static int
@@ -1544,25 +1541,20 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
__nvme_fc_abort_op(ctrl, aen_op);
}
-static inline int
+static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_fcp_op *op, int opstate)
{
unsigned long flags;
- bool complete_rq = false;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (opstate == FCPOP_STATE_ABORTED && ctrl->flags & FCCTRL_TERMIO) {
- if (!--ctrl->iocnt)
- wake_up(&ctrl->ioabort_wait);
+ if (opstate == FCPOP_STATE_ABORTED) {
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->flags & FCCTRL_TERMIO) {
+ if (!--ctrl->iocnt)
+ wake_up(&ctrl->ioabort_wait);
+ }
+ spin_unlock_irqrestore(&ctrl->lock, flags);
}
- if (op->flags & FCOP_FLAGS_RELEASED)
- complete_rq = true;
- else
- op->flags |= FCOP_FLAGS_COMPLETE;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- return complete_rq;
}
static void
@@ -1704,10 +1696,8 @@ done:
ctrl->ctrl.state == NVME_CTRL_CONNECTING))
status |= cpu_to_le16(NVME_SC_DNR << 1);
- if (__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate))
- __nvme_fc_final_op_cleanup(rq);
- else
- nvme_end_request(rq, status, result);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+ nvme_end_request(rq, status, result);
check_error:
if (terminate_assoc)
@@ -2394,45 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
}
static void
-__nvme_fc_final_op_cleanup(struct request *rq)
+nvme_fc_complete_rq(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE);
- op->flags &= ~(FCOP_FLAGS_RELEASED | FCOP_FLAGS_COMPLETE);
nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl);
-
-}
-
-static void
-nvme_fc_complete_rq(struct request *rq)
-{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- struct nvme_fc_ctrl *ctrl = op->ctrl;
- unsigned long flags;
- bool completed = false;
-
- /*
- * the core layer, on controller resets after calling
- * nvme_shutdown_ctrl(), calls complete_rq without our
- * calling blk_mq_complete_request(), thus there may still
- * be live i/o outstanding with the LLDD. Means transport has
- * to track complete calls vs fcpio_done calls to know what
- * path to take on completes and dones.
- */
- spin_lock_irqsave(&ctrl->lock, flags);
- if (op->flags & FCOP_FLAGS_COMPLETE)
- completed = true;
- else
- op->flags |= FCOP_FLAGS_RELEASED;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- if (completed)
- __nvme_fc_final_op_cleanup(rq);
}
/*