aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_nvme.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_nvme.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c236
1 files changed, 141 insertions, 95 deletions
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 22e3fba28e51..963094b3c300 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,8 +12,6 @@
static struct nvme_fc_port_template qla_nvme_fc_transport;
-static void qla_nvme_unregister_remote_port(struct work_struct *);
-
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
struct qla_nvme_rport *rport;
@@ -38,7 +36,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
(fcport->nvme_flag & NVME_FLAG_REGISTERED))
return 0;
- INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
memset(&req, 0, sizeof(struct nvme_fc_port_info));
@@ -74,7 +71,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
rport = fcport->nvme_remote_port->private;
rport->fcport = fcport;
- list_add_tail(&rport->list, &vha->nvme_rport_list);
fcport->nvme_flag |= NVME_FLAG_REGISTERED;
return 0;
@@ -124,53 +120,91 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
return 0;
}
+static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_fcp_req *fd;
+ struct srb_iocb *nvme;
+ unsigned long flags;
+
+ if (!priv)
+ goto out;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ if (priv->comp_status == QLA_SUCCESS) {
+ fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ } else {
+ fd->rcv_rsplen = 0;
+ fd->transferred_length = 0;
+ }
+ fd->status = 0;
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+ fd->done(fd);
+out:
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvmefc_ls_req *fd;
+ unsigned long flags;
+
+ if (!priv)
+ goto out;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ priv->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+
+ fd = priv->fd;
+ fd->done(fd, priv->comp_status);
+out:
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+ struct nvme_private *priv =
+ container_of(work, struct nvme_private, ls_work);
+
+ kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
+}
+
static void qla_nvme_sp_ls_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *nvme;
- struct nvmefc_ls_req *fd;
struct nvme_private *priv;
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
return;
- atomic_dec(&sp->ref_count);
-
if (res)
res = -EINVAL;
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
- priv = fd->private;
+ priv = (struct nvme_private *)sp->priv;
priv->comp_status = res;
+ INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
schedule_work(&priv->ls_work);
- /* work schedule doesn't need the sp */
- qla2x00_rel_sp(sp);
}
+/* it assumed that QPair lock is held. */
static void qla_nvme_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *nvme;
- struct nvmefc_fcp_req *fd;
-
- nvme = &sp->u.iocb_cmd;
- fd = nvme->u.nvme.desc;
-
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
+ struct nvme_private *priv = (struct nvme_private *)sp->priv;
- atomic_dec(&sp->ref_count);
-
- if (res == QLA_SUCCESS) {
- fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
- } else {
- fd->rcv_rsplen = 0;
- fd->transferred_length = 0;
- }
- fd->status = 0;
- fd->done(fd);
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
+ priv->comp_status = res;
+ kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
return;
}
@@ -189,44 +223,50 @@ static void qla_nvme_abort_work(struct work_struct *work)
__func__, sp, sp->handle, fcport, fcport->deleted);
if (!ha->flags.fw_started && (fcport && fcport->deleted))
- return;
+ goto out;
if (ha->flags.host_shutting_down) {
ql_log(ql_log_info, sp->fcport->vha, 0xffff,
"%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n",
__func__, sp, sp->type, atomic_read(&sp->ref_count));
sp->done(sp, 0);
- return;
+ goto out;
}
- if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0))
- return;
-
rval = ha->isp_ops->abort_command(sp);
ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
"%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
sp, sp->handle, fcport, rval);
+
+out:
+ /* kref_get was done before work was schedule. */
+ kref_put(&sp->cmd_kref, sp->put_fn);
}
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
schedule_work(&priv->abort_work);
}
-static void qla_nvme_ls_complete(struct work_struct *work)
-{
- struct nvme_private *priv =
- container_of(work, struct nvme_private, ls_work);
- struct nvmefc_ls_req *fd = priv->fd;
-
- fd->done(fd, priv->comp_status);
-}
static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
@@ -240,8 +280,16 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
srb_t *sp;
+
+ if (!fcport || (fcport && fcport->deleted))
+ return rval;
+
vha = fcport->vha;
ha = vha->hw;
+
+ if (!ha->flags.fw_started)
+ return rval;
+
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
@@ -250,11 +298,13 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
sp->type = SRB_NVME_LS;
sp->name = "nvme_ls";
sp->done = qla_nvme_sp_ls_done;
- atomic_set(&sp->ref_count, 1);
- nvme = &sp->u.iocb_cmd;
+ sp->put_fn = qla_nvme_release_ls_cmd_kref;
+ sp->priv = (void *)priv;
priv->sp = sp;
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
priv->fd = fd;
- INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
nvme->u.nvme.desc = fd;
nvme->u.nvme.dir = 0;
nvme->u.nvme.dl = 0;
@@ -271,8 +321,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2x00_rel_sp(sp);
return rval;
}
@@ -284,6 +336,18 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
struct nvmefc_fcp_req *fd)
{
struct nvme_private *priv = fd->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->cmd_lock, flags);
+ if (!priv->sp) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&priv->cmd_lock, flags);
INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
schedule_work(&priv->abort_work);
@@ -487,11 +551,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fcport = qla_rport->fcport;
- vha = fcport->vha;
-
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
+ if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
+ (fcport && fcport->deleted))
return rval;
+ vha = fcport->vha;
/*
* If we know the dev is going away while the transport is still sending
* IO's return busy back to stall the IO Q. This happens when the
@@ -507,12 +571,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (!sp)
return -EBUSY;
- atomic_set(&sp->ref_count, 1);
init_waitqueue_head(&sp->nvme_ls_waitq);
+ kref_init(&sp->cmd_kref);
+ spin_lock_init(&priv->cmd_lock);
+ sp->priv = (void *)priv;
priv->sp = sp;
sp->type = SRB_NVME_CMD;
sp->name = "nvme_cmd";
sp->done = qla_nvme_sp_done;
+ sp->put_fn = qla_nvme_release_fcp_cmd_kref;
sp->qpair = qpair;
sp->vha = vha;
nvme = &sp->u.iocb_cmd;
@@ -522,8 +589,10 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
- atomic_dec(&sp->ref_count);
wake_up(&sp->nvme_ls_waitq);
+ sp->priv = NULL;
+ priv->sp = NULL;
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
return rval;
@@ -542,29 +611,16 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
{
fc_port_t *fcport;
- struct qla_nvme_rport *qla_rport = rport->private, *trport;
+ struct qla_nvme_rport *qla_rport = rport->private;
fcport = qla_rport->fcport;
fcport->nvme_remote_port = NULL;
fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- list_del(&qla_rport->list);
- break;
- }
- }
- complete(&fcport->nvme_del_done);
-
- if (!test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
- INIT_WORK(&fcport->free_work, qlt_free_session_done);
- schedule_work(&fcport->free_work);
- }
-
fcport->nvme_flag &= ~NVME_FLAG_DELETING;
ql_log(ql_log_info, fcport->vha, 0x2110,
- "remoteport_delete of %p completed.\n", fcport);
+ "remoteport_delete of %p %8phN completed.\n",
+ fcport, fcport->port_name);
+ complete(&fcport->nvme_del_done);
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
@@ -586,35 +642,25 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.fcprqst_priv_sz = sizeof(struct nvme_private),
};
-static void qla_nvme_unregister_remote_port(struct work_struct *work)
+void qla_nvme_unregister_remote_port(struct fc_port *fcport)
{
- struct fc_port *fcport = container_of(work, struct fc_port,
- nvme_del_work);
- struct qla_nvme_rport *qla_rport, *trport;
+ int ret;
if (!IS_ENABLED(CONFIG_NVME_FC))
return;
ql_log(ql_log_warn, NULL, 0x2112,
- "%s: unregister remoteport on %p\n",__func__, fcport);
-
- list_for_each_entry_safe(qla_rport, trport,
- &fcport->vha->nvme_rport_list, list) {
- if (qla_rport->fcport == fcport) {
- ql_log(ql_log_info, fcport->vha, 0x2113,
- "%s: fcport=%p\n", __func__, fcport);
- nvme_fc_set_remoteport_devloss
- (fcport->nvme_remote_port, 0);
- init_completion(&fcport->nvme_del_done);
- if (nvme_fc_unregister_remoteport
- (fcport->nvme_remote_port))
- ql_log(ql_log_info, fcport->vha, 0x2114,
- "%s: Failed to unregister nvme_remote_port\n",
- __func__);
- wait_for_completion(&fcport->nvme_del_done);
- break;
- }
- }
+ "%s: unregister remoteport on %p %8phN\n",
+ __func__, fcport, fcport->port_name);
+
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+ init_completion(&fcport->nvme_del_done);
+ ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+ if (ret)
+ ql_log(ql_log_info, fcport->vha, 0x2114,
+ "%s: Failed to unregister nvme_remote_port (%d)\n",
+ __func__, ret);
+ wait_for_completion(&fcport->nvme_del_done);
}
void qla_nvme_delete(struct scsi_qla_host *vha)