aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c439
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c437
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h41
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h167
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c102
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c458
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
14 files changed, 1222 insertions, 497 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a50aa03b8ac1..196de40b906c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -202,9 +202,12 @@ struct lpfc_stats {
uint32_t elsRcvPRLO;
uint32_t elsRcvPRLI;
uint32_t elsRcvLIRR;
+ uint32_t elsRcvRLS;
uint32_t elsRcvRPS;
uint32_t elsRcvRPL;
uint32_t elsRcvRRQ;
+ uint32_t elsRcvRTV;
+ uint32_t elsRcvECHO;
uint32_t elsXmitFLOGI;
uint32_t elsXmitFDISC;
uint32_t elsXmitPLOGI;
@@ -549,9 +552,11 @@ struct lpfc_hba {
#define ELS_XRI_ABORT_EVENT 0x40
#define ASYNC_EVENT 0x80
#define LINK_DISABLED 0x100 /* Link disabled by user */
-#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */
-#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */
-#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */
+#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */
+#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */
+#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */
+#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
+#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -573,6 +578,7 @@ struct lpfc_hba {
/* These fields used to be binfo */
uint32_t fc_pref_DID; /* preferred D_ID */
uint8_t fc_pref_ALPA; /* preferred AL_PA */
+ uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
uint32_t fc_edtov; /* E_D_TOV timer value */
uint32_t fc_arbtov; /* ARB_TOV timer value */
uint32_t fc_ratov; /* R_A_TOV timer value */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f681eea57730..c1cbec01345d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3789,8 +3789,13 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
break;
case MBX_SECURITY_MGMT:
case MBX_AUTH_PORT:
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+ printk(KERN_WARNING "mbox_read:Command 0x%x "
+ "is not permitted\n", pmb->mbxCommand);
+ sysfs_mbox_idle(phba);
+ spin_unlock_irq(&phba->hbalock);
return -EPERM;
+ }
break;
case MBX_READ_SPARM64:
case MBX_READ_LA:
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d60b55f53a..7260c3af555a 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3142,12 +3142,12 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
job = menlo->set_job;
job->dd_data = NULL; /* so timeout handler does not reply */
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock(&phba->hbalock);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
bmp = menlo->bmp;
rspiocbq = menlo->rspiocbq;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 03f4ddc18572..a5f5a093a8a4 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -44,6 +44,8 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
+
void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
struct lpfc_nodelist *);
@@ -229,6 +231,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
@@ -271,6 +274,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e6ca12f6c6cb..884f4d321799 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
(elscmd == ELS_CMD_LOGO)))
switch (elscmd) {
case ELS_CMD_FLOGI:
- elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
& LPFC_FIP_ELS_ID_MASK);
break;
case ELS_CMD_FDISC:
- elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
& LPFC_FIP_ELS_ID_MASK);
break;
case ELS_CMD_LOGO:
- elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+ elsiocb->iocb_flag |=
+ ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
& LPFC_FIP_ELS_ID_MASK);
break;
}
@@ -517,18 +520,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
+ phba->fc_edtovResol = sp->cmn.edtovResolution;
phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
if (phba->fc_topology == TOPOLOGY_LOOP) {
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PUBLIC_LOOP;
spin_unlock_irq(shost->host_lock);
- } else {
- /*
- * If we are a N-port connected to a Fabric, fixup sparam's so
- * logins to devices on remote loops work.
- */
- vport->fc_sparam.cmn.altBbCredit = 1;
}
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
@@ -585,6 +583,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
+
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
@@ -800,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/*
- * In case of FIP mode, perform round robin FCF failover
+ * In case of FIP mode, perform roundrobin FCF failover
* due to new FCF discovery
*/
if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
@@ -808,48 +810,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
- "2611 FLOGI failed on registered "
- "FCF record fcf_index(%d), status: "
- "x%x/x%x, tmo:x%x, trying to perform "
- "round robin failover\n",
+ "2611 FLOGI failed on FCF (x%x), "
+ "status:x%x/x%x, tmo:x%x, perform "
+ "roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
- if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
- /*
- * Exhausted the eligible FCF record list,
- * fail through to retry FLOGI on current
- * FCF record.
- */
- lpfc_printf_log(phba, KERN_WARNING,
- LOG_FIP | LOG_ELS,
- "2760 Completed one round "
- "of FLOGI FCF round robin "
- "failover list, retry FLOGI "
- "on currently registered "
- "FCF index:%d\n",
- phba->fcf.current_rec.fcf_indx);
- } else {
- lpfc_printf_log(phba, KERN_INFO,
- LOG_FIP | LOG_ELS,
- "2794 FLOGI FCF round robin "
- "failover to FCF index x%x\n",
- fcf_index);
- rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
- fcf_index);
- if (rc)
- lpfc_printf_log(phba, KERN_WARNING,
- LOG_FIP | LOG_ELS,
- "2761 FLOGI round "
- "robin FCF failover "
- "read FCF failed "
- "rc:x%x, fcf_index:"
- "%d\n", rc,
- phba->fcf.current_rec.fcf_indx);
- else
- goto out;
- }
+ rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+ if (rc)
+ goto out;
}
/* FLOGI failure */
@@ -939,6 +909,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_nlp_put(ndlp);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
goto out;
}
@@ -947,13 +918,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (phba->hba_flag & HBA_FIP_SUPPORT)
lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
LOG_ELS,
- "2769 FLOGI successful on FCF "
- "record: current_fcf_index:"
- "x%x, terminate FCF round "
- "robin failover process\n",
+ "2769 FLOGI to FCF (x%x) "
+ "completed successfully\n",
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
spin_unlock_irq(&phba->hbalock);
goto out;
}
@@ -1175,12 +1145,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
return 0;
}
- if (lpfc_issue_els_flogi(vport, ndlp, 0))
+ if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
/* This decrement of reference count to node shall kick off
* the release of the node.
*/
lpfc_nlp_put(ndlp);
-
+ return 0;
+ }
return 1;
}
@@ -1645,6 +1616,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
sp = (struct serv_parm *) pcmd;
+ /*
+ * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+ * to device on remote loops work.
+ */
+ if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+ sp->cmn.altBbCredit = 1;
+
if (sp->cmn.fcphLow < FC_PH_4_3)
sp->cmn.fcphLow = FC_PH_4_3;
@@ -3926,6 +3904,64 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
}
/**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully issued acc echo response
+ * 1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+ struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_sli *psli;
+ uint8_t *pcmd;
+ uint16_t cmdsize;
+ int rc;
+
+ psli = &phba->sli;
+ cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+ elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+ if (!elsiocb)
+ return 1;
+
+ elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
+ /* Xmit ECHO ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext);
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t);
+ memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+ "Issue ACC ECHO: did:x%x flg:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+ phba->fc_stat.elsXmitACC++;
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ lpfc_nlp_put(ndlp);
+ elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
+ * it could be freed */
+
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+ if (rc == IOCB_ERROR) {
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 1;
+ }
+ return 0;
+}
+
+/**
* lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
* @vport: pointer to a host virtual N_Port data structure.
*
@@ -4684,6 +4720,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ * 0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ uint8_t *pcmd;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+ /* skip over first word of echo command to find echo data */
+ pcmd += sizeof(uint32_t);
+
+ lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+ return 0;
+}
+
+/**
* lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
* @vport: pointer to a host virtual N_Port data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
@@ -4735,6 +4795,89 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
}
/**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ IOCB_t *icmd;
+ struct RLS_RSP *rls_rsp;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ struct lpfc_nodelist *ndlp;
+ uint16_t xri;
+ uint32_t cmdsize;
+
+ mb = &pmb->u.mb;
+
+ ndlp = (struct lpfc_nodelist *) pmb->context2;
+ xri = (uint16_t) ((unsigned long)(pmb->context1));
+ pmb->context1 = NULL;
+ pmb->context2 = NULL;
+
+ if (mb->mbxStatus) {
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return;
+ }
+
+ cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+ mempool_free(pmb, phba->mbox_mem_pool);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ /* Decrement the ndlp reference count from previous mbox command */
+ lpfc_nlp_put(ndlp);
+
+ if (!elsiocb)
+ return;
+
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = xri;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t); /* Skip past command */
+ rls_rsp = (struct RLS_RSP *)pcmd;
+
+ rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+ rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+ rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+ rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+ rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+ rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+
+ /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+}
+
+/**
* lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
@@ -4827,7 +4970,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
/**
- * lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ * 0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_dmabuf *pcmd;
+ struct ls_rjt stat;
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
+
+ pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+ if (mbox) {
+ lpfc_read_lnk_stat(phba, mbox);
+ mbox->context1 =
+ (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+ mbox->context2 = lpfc_nlp_get(ndlp);
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+ != MBX_NOT_FINISHED)
+ /* Mbox completion will send ELS Response */
+ return 0;
+ /* Decrement reference count used for the failed mbox
+ * command.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+reject_out:
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ * 0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct ls_rjt stat;
+ struct RTV_RSP *rtv_rsp;
+ uint8_t *pcmd;
+ struct lpfc_iocbq *elsiocb;
+ uint32_t cmdsize;
+
+
+ if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+ /* reject the unsolicited RPS request and done with it */
+ goto reject_out;
+
+ cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+ elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_ACC);
+
+ if (!elsiocb)
+ return 1;
+
+ pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ pcmd += sizeof(uint32_t); /* Skip past command */
+
+ /* use the command's xri in the response */
+ elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
+
+ rtv_rsp = (struct RTV_RSP *)pcmd;
+
+ /* populate RTV payload */
+ rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+ rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+ bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+ bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+ rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+ /* Xmit ELS RLS ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+ "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+ "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+ "Data: x%x x%x x%x\n",
+ elsiocb->iotag, elsiocb->iocb.ulpContext,
+ ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_rpi,
+ rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+ elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ phba->fc_stat.elsXmitACC++;
+ if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+ lpfc_els_free_iocb(phba, elsiocb);
+ return 0;
+
+reject_out:
+ /* issue rejection response */
+ stat.un.b.lsRjtRsvd0 = 0;
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+ stat.un.b.vendorUnique = 0;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+ return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
* @vport: pointer to a host virtual N_Port data structure.
* @cmdiocb: pointer to lpfc command iocb data structure.
* @ndlp: pointer to a node-list data structure.
@@ -5017,7 +5308,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
lp = (uint32_t *) pcmd->virt;
rpl = (RPL *) (lp + 1);
-
maxsize = be32_to_cpu(rpl->maxsize);
/* We support only one port */
@@ -5836,6 +6126,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_RLS:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RLS: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvRLS++;
+ lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
case ELS_CMD_RPS:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RPS: did:x%x/ste:x%x flg:x%x",
@@ -5866,6 +6166,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_RTV:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV RTV: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+ phba->fc_stat.elsRcvRTV++;
+ lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
case ELS_CMD_RRQ:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV RRQ: did:x%x/ste:x%x flg:x%x",
@@ -5876,6 +6185,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_ECHO:
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+ "RCV ECHO: did:x%x/ste:x%x flg:x%x",
+ did, vport->port_state, ndlp->nlp_flag);
+
+ phba->fc_stat.elsRcvECHO++;
+ lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+ if (newnode)
+ lpfc_nlp_put(ndlp);
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6170,6 +6489,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
default:
/* Try to recover from this error */
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6437,6 +6758,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_unreg_rpi(vport, np);
}
lpfc_cleanup_pending_mbox(vport);
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
+
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6452,7 +6777,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* to update the MAC address.
*/
lpfc_register_new_vport(phba, vport, ndlp);
- return ;
+ goto out;
}
if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a345dde16c86..a5d1695dac3d 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
*******************************************************************/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/kthread.h>
@@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = {
static void lpfc_disc_timeout_handler(struct lpfc_vport *);
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int lpfc_fcf_inuse(struct lpfc_hba *);
void
lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
}
-/*
- * This function is called from the worker thread when dev_loss_tmo
- * expire.
- */
-static void
+/**
+ * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
+ * @ndlp: Pointer to remote node object.
+ *
+ * This function is called from the worker thread when devloss timeout timer
+ * expires. For SLI4 host, this routine shall return 1 when at lease one
+ * remote node, including this @ndlp, is still in use of FCF; otherwise, this
+ * routine shall return 0 when there is no remote node is still in use of FCF
+ * when devloss timeout happened to this @ndlp.
+ **/
+static int
lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
{
struct lpfc_rport_data *rdata;
@@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
int put_node;
int put_rport;
int warn_on = 0;
+ int fcf_inuse = 0;
rport = ndlp->rport;
if (!rport)
- return;
+ return fcf_inuse;
rdata = rport->dd_data;
name = (uint8_t *) &ndlp->nlp_portname;
vport = ndlp->vport;
phba = vport->phba;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ fcf_inuse = lpfc_fcf_inuse(phba);
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
"rport devlosstmo:did:x%x type:x%x id:x%x",
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
@@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_nlp_put(ndlp);
if (put_rport)
put_device(&rport->dev);
- return;
+ return fcf_inuse;
}
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
@@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID);
- return;
+ return fcf_inuse;
}
if (ndlp->nlp_type & NLP_FABRIC) {
@@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_nlp_put(ndlp);
if (put_rport)
put_device(&rport->dev);
- return;
+ return fcf_inuse;
}
if (ndlp->nlp_sid != NLP_NO_SID) {
@@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
(ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ return fcf_inuse;
+}
+
+/**
+ * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
+ * @phba: Pointer to hba context object.
+ * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
+ * @nlp_did: remote node identifer with devloss timeout.
+ *
+ * This function is called from the worker thread after invoking devloss
+ * timeout handler and releasing the reference count for the ndlp with
+ * which the devloss timeout was handled for SLI4 host. For the devloss
+ * timeout of the last remote node which had been in use of FCF, when this
+ * routine is invoked, it shall be guaranteed that none of the remote are
+ * in-use of FCF. When devloss timeout to the last remote using the FCF,
+ * if the FIP engine is neither in FCF table scan process nor roundrobin
+ * failover process, the in-use FCF shall be unregistered. If the FIP
+ * engine is in FCF discovery process, the devloss timeout state shall
+ * be set for either the FCF table scan process or roundrobin failover
+ * process to unregister the in-use FCF.
+ **/
+static void
+lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
+ uint32_t nlp_did)
+{
+ /* If devloss timeout happened to a remote node when FCF had no
+ * longer been in-use, do nothing.
+ */
+ if (!fcf_inuse)
+ return;
+
+ if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+ spin_lock_irq(&phba->hbalock);
+ if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
+ phba->hba_flag |= HBA_DEVLOSS_TMO;
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2847 Last remote node (x%x) using "
+ "FCF devloss tmo\n", nlp_did);
+ }
+ if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2868 Devloss tmo to FCF rediscovery "
+ "in progress\n");
+ return;
+ }
+ if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2869 Devloss tmo to idle FIP engine, "
+ "unreg in-use FCF and rescan.\n");
+ /* Unregister in-use FCF and rescan */
+ lpfc_unregister_fcf_rescan(phba);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ if (phba->hba_flag & FCF_TS_INPROG)
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2870 FCF table scan in progress\n");
+ if (phba->hba_flag & FCF_RR_INPROG)
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2871 FLOGI roundrobin FCF failover "
+ "in progress\n");
+ }
lpfc_unregister_unused_fcf(phba);
}
@@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba)
struct lpfc_work_evt *evtp = NULL;
struct lpfc_nodelist *ndlp;
int free_evt;
+ int fcf_inuse;
+ uint32_t nlp_did;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
@@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_DEV_LOSS:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_dev_loss_tmo_handler(ndlp);
+ fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
free_evt = 0;
/* decrement the node reference count held for
* this queued work
*/
+ nlp_did = ndlp->nlp_DID;
lpfc_nlp_put(ndlp);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_post_dev_loss_tmo_handler(phba,
+ fcf_inuse,
+ nlp_did);
break;
case LPFC_EVT_ONLINE:
if (phba->link_state < LPFC_LINK_DOWN)
@@ -707,6 +794,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
: NLP_EVT_DEVICE_RECOVERY);
}
if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vport);
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -1021,8 +1110,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
"2017 REG_FCFI mbxStatus error x%x "
"HBA state x%x\n",
mboxq->u.mb.mbxStatus, vport->port_state);
- mempool_free(mboxq, phba->mbox_mem_pool);
- return;
+ goto fail_out;
}
/* Start FCoE discovery by sending a FLOGI. */
@@ -1031,20 +1119,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_REGISTERED;
spin_unlock_irq(&phba->hbalock);
+
/* If there is a pending FCoE event, restart FCF table scan. */
- if (lpfc_check_pending_fcoe_event(phba, 1)) {
- mempool_free(mboxq, phba->mbox_mem_pool);
- return;
- }
+ if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ goto fail_out;
+
+ /* Mark successful completion of FCF table scan */
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
- if (vport->port_state != LPFC_FLOGI)
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ if (vport->port_state != LPFC_FLOGI) {
+ phba->hba_flag |= FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
lpfc_initial_flogi(vport);
+ goto out;
+ }
+ spin_unlock_irq(&phba->hbalock);
+ goto out;
+fail_out:
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+out:
mempool_free(mboxq, phba->mbox_mem_pool);
- return;
}
/**
@@ -1241,10 +1339,9 @@ lpfc_register_fcf(struct lpfc_hba *phba)
int rc;
spin_lock_irq(&phba->hbalock);
-
/* If the FCF is not availabe do nothing. */
if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1252,19 +1349,22 @@ lpfc_register_fcf(struct lpfc_hba *phba)
/* The FCF is already registered, start discovery */
if (phba->fcf.fcf_flag & FCF_REGISTERED) {
phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
- if (phba->pport->port_state != LPFC_FLOGI)
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ if (phba->pport->port_state != LPFC_FLOGI) {
+ phba->hba_flag |= FCF_RR_INPROG;
+ spin_unlock_irq(&phba->hbalock);
lpfc_initial_flogi(phba->pport);
+ return;
+ }
+ spin_unlock_irq(&phba->hbalock);
return;
}
spin_unlock_irq(&phba->hbalock);
- fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
+ fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!fcf_mbxq) {
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1275,7 +1375,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
spin_unlock_irq(&phba->hbalock);
mempool_free(fcf_mbxq, phba->mbox_mem_pool);
}
@@ -1493,7 +1593,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* FCF discovery, no need to restart FCF discovery.
*/
if ((phba->link_state >= LPFC_LINK_UP) &&
- (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+ (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
return 0;
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -1517,14 +1617,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
} else {
/*
- * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
+ * Do not continue FCF discovery and clear FCF_TS_INPROG
* flag
*/
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2833 Stop FCF discovery process due to link "
"state change (x%x)\n", phba->link_state);
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
spin_unlock_irq(&phba->hbalock);
}
@@ -1729,6 +1829,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
}
/**
+ * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
+ * @vport: Pointer to vport object.
+ * @fcf_index: index to next fcf.
+ *
+ * This function processing the roundrobin fcf failover to next fcf index.
+ * When this function is invoked, there will be a current fcf registered
+ * for flogi.
+ * Return: 0 for continue retrying flogi on currently registered fcf;
+ * 1 for stop flogi on currently registered fcf;
+ */
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int rc;
+
+ if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2872 Devloss tmo with no eligible "
+ "FCF, unregister in-use FCF (x%x) "
+ "and rescan FCF table\n",
+ phba->fcf.current_rec.fcf_indx);
+ lpfc_unregister_fcf_rescan(phba);
+ goto stop_flogi_current_fcf;
+ }
+ /* Mark the end to FLOGI roundrobin failover */
+ phba->hba_flag &= ~FCF_RR_INPROG;
+ /* Allow action to new fcf asynchronous event */
+ phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2865 No FCF available, stop roundrobin FCF "
+ "failover and change port state:x%x/x%x\n",
+ phba->pport->port_state, LPFC_VPORT_UNKNOWN);
+ phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+ goto stop_flogi_current_fcf;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
+ "2794 Try FLOGI roundrobin FCF failover to "
+ "(x%x)\n", fcf_index);
+ rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
+ if (rc)
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+ "2761 FLOGI roundrobin FCF failover "
+ "failed (rc:x%x) to read FCF (x%x)\n",
+ rc, phba->fcf.current_rec.fcf_indx);
+ else
+ goto stop_flogi_current_fcf;
+ }
+ return 0;
+
+stop_flogi_current_fcf:
+ lpfc_can_disctmo(vport);
+ return 1;
+}
+
+/**
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
@@ -1756,7 +1915,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int rc;
/* If there is pending FCoE event restart FCF table scan */
- if (lpfc_check_pending_fcoe_event(phba, 0)) {
+ if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return;
}
@@ -1765,12 +1924,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
&next_fcf_index);
if (!new_fcf_record) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2765 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
/* Let next new FCF event trigger fast failover */
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~FCF_TS_INPROG;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return;
@@ -1787,13 +1946,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/*
* If the fcf record does not match with connect list entries
* read the next entry; otherwise, this is an eligible FCF
- * record for round robin FCF failover.
+ * record for roundrobin FCF failover.
*/
if (!rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "2781 FCF record (x%x) failed FCF "
- "connection list check, fcf_avail:x%x, "
- "fcf_valid:x%x\n",
+ "2781 FCF (x%x) failed connection "
+ "list check: (x%x/x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record),
bf_get(lpfc_fcf_record_fcf_avail,
@@ -1803,6 +1961,16 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+ if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
+ phba->fcf.current_rec.fcf_indx) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2862 FCF (x%x) matches property "
+ "of in-use FCF (x%x)\n",
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record),
+ phba->fcf.current_rec.fcf_indx);
+ goto read_next_fcf;
+ }
/*
* In case the current in-use FCF record becomes
* invalid/unavailable during FCF discovery that
@@ -1813,9 +1981,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2835 Invalid in-use FCF "
- "record (x%x) reported, "
- "entering fast FCF failover "
- "mode scanning.\n",
+ "(x%x), enter FCF failover "
+ "table scan.\n",
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_REDISC_FOV;
@@ -1844,22 +2011,29 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (phba->fcf.fcf_flag & FCF_IN_USE) {
if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
new_fcf_record, vlan_id)) {
- phba->fcf.fcf_flag |= FCF_AVAILABLE;
- if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
- /* Stop FCF redisc wait timer if pending */
- __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
- else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
- /* If in fast failover, mark it's completed */
- phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
- spin_unlock_irq(&phba->hbalock);
- lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2836 The new FCF record (x%x) "
- "matches the in-use FCF record "
- "(x%x)\n",
- phba->fcf.current_rec.fcf_indx,
+ if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
+ phba->fcf.current_rec.fcf_indx) {
+ phba->fcf.fcf_flag |= FCF_AVAILABLE;
+ if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+ /* Stop FCF redisc wait timer */
+ __lpfc_sli4_stop_fcf_redisc_wait_timer(
+ phba);
+ else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+ /* Fast failover, mark completed */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+ spin_unlock_irq(&phba->hbalock);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2836 New FCF matches in-use "
+ "FCF (x%x)\n",
+ phba->fcf.current_rec.fcf_indx);
+ goto out;
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "2863 New FCF (x%x) matches "
+ "property of in-use FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
- new_fcf_record));
- goto out;
+ new_fcf_record),
+ phba->fcf.current_rec.fcf_indx);
}
/*
* Read next FCF record from HBA searching for the matching
@@ -1953,8 +2127,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
if (fcf_rec) {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2840 Update current FCF record "
- "with initial FCF record (x%x)\n",
+ "2840 Update initial FCF candidate "
+ "with FCF (x%x)\n",
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record));
__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
@@ -1984,20 +2158,28 @@ read_next_fcf:
*/
if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "2782 No suitable FCF record "
- "found during this round of "
- "post FCF rediscovery scan: "
- "fcf_evt_tag:x%x, fcf_index: "
- "x%x\n",
+ "2782 No suitable FCF found: "
+ "(x%x/x%x)\n",
phba->fcoe_eventtag_at_fcf_scan,
bf_get(lpfc_fcf_record_fcf_index,
new_fcf_record));
+ spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+ phba->hba_flag &= ~FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+ /* Unregister in-use FCF and rescan */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_FIP,
+ "2864 On devloss tmo "
+ "unreg in-use FCF and "
+ "rescan FCF table\n");
+ lpfc_unregister_fcf_rescan(phba);
+ return;
+ }
/*
- * Let next new FCF event trigger fast
- * failover
+ * Let next new FCF event trigger fast failover
*/
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~FCF_TS_INPROG;
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -2015,9 +2197,8 @@ read_next_fcf:
/* Replace in-use record with the new record */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2842 Replace the current in-use "
- "FCF record (x%x) with failover FCF "
- "record (x%x)\n",
+ "2842 Replace in-use FCF (x%x) "
+ "with failover FCF (x%x)\n",
phba->fcf.current_rec.fcf_indx,
phba->fcf.failover_rec.fcf_indx);
memcpy(&phba->fcf.current_rec,
@@ -2029,15 +2210,8 @@ read_next_fcf:
* FCF failover.
*/
spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &=
- ~(FCF_REDISC_FOV | FCF_REDISC_RRU);
+ phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
spin_unlock_irq(&phba->hbalock);
- /*
- * Set up the initial registered FCF index for FLOGI
- * round robin FCF failover.
- */
- phba->fcf.fcf_rr_init_indx =
- phba->fcf.failover_rec.fcf_indx;
/* Register to the new FCF record */
lpfc_register_fcf(phba);
} else {
@@ -2069,28 +2243,6 @@ read_next_fcf:
LPFC_FCOE_FCF_GET_FIRST);
return;
}
-
- /*
- * Otherwise, initial scan or post linkdown rescan,
- * register with the best FCF record found so far
- * through the FCF scanning process.
- */
-
- /*
- * Mark the initial FCF discovery completed and
- * the start of the first round of the roundrobin
- * FCF failover.
- */
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &=
- ~(FCF_INIT_DISC | FCF_REDISC_RRU);
- spin_unlock_irq(&phba->hbalock);
- /*
- * Set up the initial registered FCF index for FLOGI
- * round robin FCF failover
- */
- phba->fcf.fcf_rr_init_indx =
- phba->fcf.current_rec.fcf_indx;
/* Register to the new FCF record */
lpfc_register_fcf(phba);
}
@@ -2106,11 +2258,11 @@ out:
}
/**
- * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
*
- * This is the callback function for FLOGI failure round robin FCF failover
+ * This is the callback function for FLOGI failure roundrobin FCF failover
* read FCF record mailbox command from the eligible FCF record bmask for
* performing the failover. If the FCF read back is not valid/available, it
* fails through to retrying FLOGI to the currently registered FCF again.
@@ -2125,17 +2277,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
- uint16_t next_fcf_index;
+ uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
uint16_t vlan_id;
+ int rc;
- /* If link state is not up, stop the round robin failover process */
+ /* If link state is not up, stop the roundrobin failover process */
if (phba->link_state < LPFC_LINK_UP) {
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+ phba->hba_flag &= ~FCF_RR_INPROG;
spin_unlock_irq(&phba->hbalock);
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
- return;
+ goto out;
}
/* Parse the FCF record from the non-embedded mailbox command */
@@ -2145,23 +2298,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2766 Mailbox command READ_FCF_RECORD "
"failed to retrieve a FCF record.\n");
- goto out;
+ goto error_out;
}
/* Get the needed parameters from FCF record */
- lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
- &addr_mode, &vlan_id);
+ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+ &addr_mode, &vlan_id);
/* Log the FCF record information if turned on */
lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
next_fcf_index);
+ fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2848 Remove ineligible FCF (x%x) from "
+ "from roundrobin bmask\n", fcf_index);
+ /* Clear roundrobin bmask bit for ineligible FCF */
+ lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
+ /* Perform next round of roundrobin FCF failover */
+ fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+ rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
+ if (rc)
+ goto out;
+ goto error_out;
+ }
+
+ if (fcf_index == phba->fcf.current_rec.fcf_indx) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2760 Perform FLOGI roundrobin FCF failover: "
+ "FCF (x%x) back to FCF (x%x)\n",
+ phba->fcf.current_rec.fcf_indx, fcf_index);
+ /* Wait 500 ms before retrying FLOGI to current FCF */
+ msleep(500);
+ lpfc_initial_flogi(phba->pport);
+ goto out;
+ }
+
/* Upload new FCF record to the failover FCF record */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2834 Update the current FCF record (x%x) "
- "with the next FCF record (x%x)\n",
- phba->fcf.failover_rec.fcf_indx,
- bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+ "2834 Update current FCF (x%x) with new FCF (x%x)\n",
+ phba->fcf.failover_rec.fcf_indx, fcf_index);
spin_lock_irq(&phba->hbalock);
__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
new_fcf_record, addr_mode, vlan_id,
@@ -2178,14 +2355,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sizeof(struct lpfc_fcf_rec));
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2783 FLOGI round robin FCF failover from FCF "
- "(x%x) to FCF (x%x).\n",
- current_fcf_index,
- bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+ "2783 Perform FLOGI roundrobin FCF failover: FCF "
+ "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
+error_out:
+ lpfc_register_fcf(phba);
out:
lpfc_sli4_mbox_cmd_free(phba, mboxq);
- lpfc_register_fcf(phba);
}
/**
@@ -2194,10 +2370,10 @@ out:
* @mboxq: pointer to mailbox object.
*
* This is the callback function of read FCF record mailbox command for
- * updating the eligible FCF bmask for FLOGI failure round robin FCF
+ * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
* failover when a new FCF event happened. If the FCF read back is
* valid/available and it passes the connection list check, it updates
- * the bmask for the eligible FCF record for round robin failover.
+ * the bmask for the eligible FCF record for roundrobin failover.
*/
void
lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
@@ -2639,7 +2815,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
* and get the FCF Table.
*/
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+ if (phba->hba_flag & FCF_TS_INPROG) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -3906,6 +4082,11 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport)
LPFC_MBOXQ_t *mbox;
int rc;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli4_unreg_all_rpis(vport);
+ return;
+ }
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
@@ -3992,6 +4173,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
}
spin_lock_irq(&phba->hbalock);
+ /* Cleanup REG_LOGIN completions which are not yet processed */
+ list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+ if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+ (ndlp != (struct lpfc_nodelist *) mb->context2))
+ continue;
+
+ mb->context2 = NULL;
+ mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ }
+
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -5170,6 +5361,8 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
if (ndlp)
lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
lpfc_cleanup_pending_mbox(vports[i]);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli4_unreg_all_rpis(vports[i]);
lpfc_mbx_unreg_vpi(vports[i]);
shost = lpfc_shost_from_vport(vports[i]);
spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index a631647051d9..9b8333456465 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -861,6 +861,47 @@ typedef struct _RPS_RSP { /* Structure is in Big Endian format */
uint32_t crcCnt;
} RPS_RSP;
+struct RLS { /* Structure is in Big Endian format */
+ uint32_t rls;
+#define rls_rsvd_SHIFT 24
+#define rls_rsvd_MASK 0x000000ff
+#define rls_rsvd_WORD rls
+#define rls_did_SHIFT 0
+#define rls_did_MASK 0x00ffffff
+#define rls_did_WORD rls
+};
+
+struct RLS_RSP { /* Structure is in Big Endian format */
+ uint32_t linkFailureCnt;
+ uint32_t lossSyncCnt;
+ uint32_t lossSignalCnt;
+ uint32_t primSeqErrCnt;
+ uint32_t invalidXmitWord;
+ uint32_t crcCnt;
+};
+
+struct RTV_RSP { /* Structure is in Big Endian format */
+ uint32_t ratov;
+ uint32_t edtov;
+ uint32_t qtov;
+#define qtov_rsvd0_SHIFT 28
+#define qtov_rsvd0_MASK 0x0000000f
+#define qtov_rsvd0_WORD qtov /* reserved */
+#define qtov_edtovres_SHIFT 27
+#define qtov_edtovres_MASK 0x00000001
+#define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */
+#define qtov__rsvd1_SHIFT 19
+#define qtov_rsvd1_MASK 0x0000003f
+#define qtov_rsvd1_WORD qtov /* reserved */
+#define qtov_rttov_SHIFT 18
+#define qtov_rttov_MASK 0x00000001
+#define qtov_rttov_WORD qtov /* R_T_TOV value */
+#define qtov_rsvd2_SHIFT 0
+#define qtov_rsvd2_MASK 0x0003ffff
+#define qtov_rsvd2_WORD qtov /* reserved */
+};
+
+
typedef struct _RPL { /* Structure is in Big Endian format */
uint32_t maxsize;
uint32_t index;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bbdcf96800f6..6e4bc34e1d0d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -424,79 +424,6 @@ struct lpfc_rcqe {
#define FCOE_SOFn3 0x36
};
-struct lpfc_wqe_generic{
- struct ulp_bde64 bde;
- uint32_t word3;
- uint32_t word4;
- uint32_t word5;
- uint32_t word6;
-#define lpfc_wqe_gen_context_SHIFT 16
-#define lpfc_wqe_gen_context_MASK 0x0000FFFF
-#define lpfc_wqe_gen_context_WORD word6
-#define lpfc_wqe_gen_xri_SHIFT 0
-#define lpfc_wqe_gen_xri_MASK 0x0000FFFF
-#define lpfc_wqe_gen_xri_WORD word6
- uint32_t word7;
-#define lpfc_wqe_gen_lnk_SHIFT 23
-#define lpfc_wqe_gen_lnk_MASK 0x00000001
-#define lpfc_wqe_gen_lnk_WORD word7
-#define lpfc_wqe_gen_erp_SHIFT 22
-#define lpfc_wqe_gen_erp_MASK 0x00000001
-#define lpfc_wqe_gen_erp_WORD word7
-#define lpfc_wqe_gen_pu_SHIFT 20
-#define lpfc_wqe_gen_pu_MASK 0x00000003
-#define lpfc_wqe_gen_pu_WORD word7
-#define lpfc_wqe_gen_class_SHIFT 16
-#define lpfc_wqe_gen_class_MASK 0x00000007
-#define lpfc_wqe_gen_class_WORD word7
-#define lpfc_wqe_gen_command_SHIFT 8
-#define lpfc_wqe_gen_command_MASK 0x000000FF
-#define lpfc_wqe_gen_command_WORD word7
-#define lpfc_wqe_gen_status_SHIFT 4
-#define lpfc_wqe_gen_status_MASK 0x0000000F
-#define lpfc_wqe_gen_status_WORD word7
-#define lpfc_wqe_gen_ct_SHIFT 2
-#define lpfc_wqe_gen_ct_MASK 0x00000003
-#define lpfc_wqe_gen_ct_WORD word7
- uint32_t abort_tag;
- uint32_t word9;
-#define lpfc_wqe_gen_request_tag_SHIFT 0
-#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF
-#define lpfc_wqe_gen_request_tag_WORD word9
- uint32_t word10;
-#define lpfc_wqe_gen_ccp_SHIFT 24
-#define lpfc_wqe_gen_ccp_MASK 0x000000FF
-#define lpfc_wqe_gen_ccp_WORD word10
-#define lpfc_wqe_gen_ccpe_SHIFT 23
-#define lpfc_wqe_gen_ccpe_MASK 0x00000001
-#define lpfc_wqe_gen_ccpe_WORD word10
-#define lpfc_wqe_gen_pv_SHIFT 19
-#define lpfc_wqe_gen_pv_MASK 0x00000001
-#define lpfc_wqe_gen_pv_WORD word10
-#define lpfc_wqe_gen_pri_SHIFT 16
-#define lpfc_wqe_gen_pri_MASK 0x00000007
-#define lpfc_wqe_gen_pri_WORD word10
- uint32_t word11;
-#define lpfc_wqe_gen_cq_id_SHIFT 16
-#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF
-#define lpfc_wqe_gen_cq_id_WORD word11
-#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
-#define lpfc_wqe_gen_wqec_SHIFT 7
-#define lpfc_wqe_gen_wqec_MASK 0x00000001
-#define lpfc_wqe_gen_wqec_WORD word11
-#define ELS_ID_FLOGI 3
-#define ELS_ID_FDISC 2
-#define ELS_ID_LOGO 1
-#define ELS_ID_DEFAULT 0
-#define lpfc_wqe_gen_els_id_SHIFT 4
-#define lpfc_wqe_gen_els_id_MASK 0x00000003
-#define lpfc_wqe_gen_els_id_WORD word11
-#define lpfc_wqe_gen_cmd_type_SHIFT 0
-#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F
-#define lpfc_wqe_gen_cmd_type_WORD word11
- uint32_t payload[4];
-};
-
struct lpfc_rqe {
uint32_t address_hi;
uint32_t address_lo;
@@ -2279,9 +2206,36 @@ struct wqe_common {
#define wqe_reqtag_MASK 0x0000FFFF
#define wqe_reqtag_WORD word9
#define wqe_rcvoxid_SHIFT 16
-#define wqe_rcvoxid_MASK 0x0000FFFF
-#define wqe_rcvoxid_WORD word9
+#define wqe_rcvoxid_MASK 0x0000FFFF
+#define wqe_rcvoxid_WORD word9
uint32_t word10;
+#define wqe_ebde_cnt_SHIFT 0
+#define wqe_ebde_cnt_MASK 0x00000007
+#define wqe_ebde_cnt_WORD word10
+#define wqe_lenloc_SHIFT 7
+#define wqe_lenloc_MASK 0x00000003
+#define wqe_lenloc_WORD word10
+#define LPFC_WQE_LENLOC_NONE 0
+#define LPFC_WQE_LENLOC_WORD3 1
+#define LPFC_WQE_LENLOC_WORD12 2
+#define LPFC_WQE_LENLOC_WORD4 3
+#define wqe_qosd_SHIFT 9
+#define wqe_qosd_MASK 0x00000001
+#define wqe_qosd_WORD word10
+#define wqe_xbl_SHIFT 11
+#define wqe_xbl_MASK 0x00000001
+#define wqe_xbl_WORD word10
+#define wqe_iod_SHIFT 13
+#define wqe_iod_MASK 0x00000001
+#define wqe_iod_WORD word10
+#define LPFC_WQE_IOD_WRITE 0
+#define LPFC_WQE_IOD_READ 1
+#define wqe_dbde_SHIFT 14
+#define wqe_dbde_MASK 0x00000001
+#define wqe_dbde_WORD word10
+#define wqe_wqes_SHIFT 15
+#define wqe_wqes_MASK 0x00000001
+#define wqe_wqes_WORD word10
#define wqe_pri_SHIFT 16
#define wqe_pri_MASK 0x00000007
#define wqe_pri_WORD word10
@@ -2295,18 +2249,26 @@ struct wqe_common {
#define wqe_ccpe_MASK 0x00000001
#define wqe_ccpe_WORD word10
#define wqe_ccp_SHIFT 24
-#define wqe_ccp_MASK 0x000000ff
-#define wqe_ccp_WORD word10
+#define wqe_ccp_MASK 0x000000ff
+#define wqe_ccp_WORD word10
uint32_t word11;
-#define wqe_cmd_type_SHIFT 0
-#define wqe_cmd_type_MASK 0x0000000f
-#define wqe_cmd_type_WORD word11
-#define wqe_wqec_SHIFT 7
-#define wqe_wqec_MASK 0x00000001
-#define wqe_wqec_WORD word11
-#define wqe_cqid_SHIFT 16
-#define wqe_cqid_MASK 0x0000ffff
-#define wqe_cqid_WORD word11
+#define wqe_cmd_type_SHIFT 0
+#define wqe_cmd_type_MASK 0x0000000f
+#define wqe_cmd_type_WORD word11
+#define wqe_els_id_SHIFT 4
+#define wqe_els_id_MASK 0x00000003
+#define wqe_els_id_WORD word11
+#define LPFC_ELS_ID_FLOGI 3
+#define LPFC_ELS_ID_FDISC 2
+#define LPFC_ELS_ID_LOGO 1
+#define LPFC_ELS_ID_DEFAULT 0
+#define wqe_wqec_SHIFT 7
+#define wqe_wqec_MASK 0x00000001
+#define wqe_wqec_WORD word11
+#define wqe_cqid_SHIFT 16
+#define wqe_cqid_MASK 0x0000ffff
+#define wqe_cqid_WORD word11
+#define LPFC_WQE_CQ_ID_DEFAULT 0xffff
};
struct wqe_did {
@@ -2325,6 +2287,15 @@ struct wqe_did {
#define wqe_xmit_bls_xo_WORD word5
};
+struct lpfc_wqe_generic{
+ struct ulp_bde64 bde;
+ uint32_t word3;
+ uint32_t word4;
+ uint32_t word5;
+ struct wqe_common wqe_com;
+ uint32_t payload[4];
+};
+
struct els_request64_wqe {
struct ulp_bde64 bde;
uint32_t payload_len;
@@ -2356,9 +2327,9 @@ struct els_request64_wqe {
struct xmit_els_rsp64_wqe {
struct ulp_bde64 bde;
- uint32_t rsvd3;
+ uint32_t response_payload_len;
uint32_t rsvd4;
- struct wqe_did wqe_dest;
+ struct wqe_did wqe_dest;
struct wqe_common wqe_com; /* words 6-11 */
uint32_t rsvd_12_15[4];
};
@@ -2427,7 +2398,7 @@ struct wqe_rctl_dfctl {
struct xmit_seq64_wqe {
struct ulp_bde64 bde;
- uint32_t paylaod_offset;
+ uint32_t rsvd3;
uint32_t relative_offset;
struct wqe_rctl_dfctl wge_ctl;
struct wqe_common wqe_com; /* words 6-11 */
@@ -2437,7 +2408,7 @@ struct xmit_seq64_wqe {
};
struct xmit_bcast64_wqe {
struct ulp_bde64 bde;
- uint32_t paylaod_len;
+ uint32_t seq_payload_len;
uint32_t rsvd4;
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
@@ -2446,8 +2417,8 @@ struct xmit_bcast64_wqe {
struct gen_req64_wqe {
struct ulp_bde64 bde;
- uint32_t command_len;
- uint32_t payload_len;
+ uint32_t request_payload_len;
+ uint32_t relative_offset;
struct wqe_rctl_dfctl wge_ctl; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
uint32_t rsvd_12_15[4];
@@ -2480,7 +2451,7 @@ struct abort_cmd_wqe {
struct fcp_iwrite64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_len;
+ uint32_t payload_offset_len;
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
@@ -2489,7 +2460,7 @@ struct fcp_iwrite64_wqe {
struct fcp_iread64_wqe {
struct ulp_bde64 bde;
- uint32_t payload_len; /* word 3 */
+ uint32_t payload_offset_len; /* word 3 */
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
@@ -2497,10 +2468,12 @@ struct fcp_iread64_wqe {
};
struct fcp_icmnd64_wqe {
- struct ulp_bde64 bde; /* words 0-2 */
- uint32_t rsrvd[3]; /* words 3-5 */
+ struct ulp_bde64 bde; /* words 0-2 */
+ uint32_t rsrvd3; /* word 3 */
+ uint32_t rsrvd4; /* word 4 */
+ uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 295c7ddb36c1..b3065791f303 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -813,6 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba)
return 0;
}
+
/**
* lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
* @phba: pointer to lpfc HBA data structure.
@@ -2234,10 +2235,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
void
__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
{
- /* Clear pending FCF rediscovery wait and failover in progress flags */
- phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
- FCF_DEAD_DISC |
- FCF_ACVL_DISC);
+ /* Clear pending FCF rediscovery wait flag */
+ phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+
/* Now, try to stop the timer */
del_timer(&phba->fcf.redisc_wait);
}
@@ -2261,6 +2261,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
return;
}
__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+ /* Clear failover in progress flags */
+ phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
spin_unlock_irq(&phba->hbalock);
}
@@ -2935,8 +2937,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
phba->fcf.fcf_flag |= FCF_REDISC_EVT;
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2776 FCF rediscover wait timer expired, post "
- "a worker thread event for FCF table scan\n");
+ "2776 FCF rediscover quiescent timer expired\n");
/* wake up worker thread */
lpfc_worker_wake_up(phba);
}
@@ -3311,35 +3312,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
- "2546 New FCF found event: "
- "evt_tag:x%x, fcf_index:x%x\n",
+ "2546 New FCF event, evt_tag:x%x, "
+ "index:x%x\n",
acqe_fcoe->event_tag,
acqe_fcoe->index);
else
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
LOG_DISCOVERY,
- "2788 FCF parameter modified event: "
- "evt_tag:x%x, fcf_index:x%x\n",
+ "2788 FCF param modified event, "
+ "evt_tag:x%x, index:x%x\n",
acqe_fcoe->event_tag,
acqe_fcoe->index);
if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
/*
* During period of FCF discovery, read the FCF
* table record indexed by the event to update
- * FCF round robin failover eligible FCF bmask.
+ * FCF roundrobin failover eligible FCF bmask.
*/
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
LOG_DISCOVERY,
- "2779 Read new FCF record with "
- "fcf_index:x%x for updating FCF "
- "round robin failover bmask\n",
+ "2779 Read FCF (x%x) for updating "
+ "roundrobin FCF failover bmask\n",
acqe_fcoe->index);
rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
}
/* If the FCF discovery is in progress, do nothing. */
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+ if (phba->hba_flag & FCF_TS_INPROG) {
spin_unlock_irq(&phba->hbalock);
break;
}
@@ -3358,15 +3358,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
/* Otherwise, scan the entire FCF table and re-discover SAN */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
- "2770 Start FCF table scan due to new FCF "
- "event: evt_tag:x%x, fcf_index:x%x\n",
+ "2770 Start FCF table scan per async FCF "
+ "event, evt_tag:x%x, index:x%x\n",
acqe_fcoe->event_tag, acqe_fcoe->index);
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2547 Issue FCF scan read FCF mailbox "
- "command failed 0x%x\n", rc);
+ "command failed (x%x)\n", rc);
break;
case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3378,9 +3378,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
- "2549 FCF disconnected from network index 0x%x"
- " tag 0x%x\n", acqe_fcoe->index,
- acqe_fcoe->event_tag);
+ "2549 FCF (x%x) disconnected from network, "
+ "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
/*
* If we are in the middle of FCF failover process, clear
* the corresponding FCF bit in the roundrobin bitmap.
@@ -3494,9 +3493,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
LOG_DISCOVERY,
- "2773 Start FCF fast failover due "
- "to CVL event: evt_tag:x%x\n",
- acqe_fcoe->event_tag);
+ "2773 Start FCF failover per CVL, "
+ "evt_tag:x%x\n", acqe_fcoe->event_tag);
rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3646,8 +3644,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
/* Scan FCF table from the first entry to re-discover SAN */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
- "2777 Start FCF table scan after FCF "
- "rediscovery quiescent period over\n");
+ "2777 Start post-quiescent FCF table scan\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@@ -4165,7 +4162,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_active_sgl;
}
- /* Allocate eligible FCF bmask memory for FCF round robin failover */
+ /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
GFP_KERNEL);
@@ -7271,6 +7268,51 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to wait for completion
+ * of device's XRIs exchange busy. It will check the XRI exchange busy
+ * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
+ * that, it will check the XRI exchange busy on outstanding FCP and ELS
+ * I/Os every 30 seconds, log error message, and wait forever. Only when
+ * all XRI exchange busy complete, the driver unload shall proceed with
+ * invoking the function reset ioctl mailbox command to the CNA and the
+ * the rest of the driver unload resource release.
+ **/
+static void
+lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
+{
+ int wait_time = 0;
+ int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+ while (!fcp_xri_cmpl || !els_xri_cmpl) {
+ if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+ if (!fcp_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2877 FCP XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
+ if (!els_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2878 ELS XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
+ wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
+ } else {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
+ }
+ fcp_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ els_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ }
+}
+
+/**
* lpfc_sli4_hba_unset - Unset the fcoe hba
* @phba: Pointer to HBA context object.
*
@@ -7315,6 +7357,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
+ /* Abort all iocbs associated with the hba */
+ lpfc_sli_hba_iocb_abort(phba);
+
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
+
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 0dfa310cd609..62d0957e1d4c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -797,6 +797,34 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
}
/**
+ * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
+ * @vport: pointer to a vport object.
+ *
+ * This routine sends mailbox command to unregister all active RPIs for
+ * a vport.
+ **/
+void
+lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ LPFC_MBOXQ_t *mbox;
+ int rc;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_unreg_login(phba, vport->vpi,
+ vport->vpi + phba->vpi_base, mbox);
+ mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+ mbox->vport = vport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+}
+
+/**
* lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
* @phba: pointer to lpfc hba data structure.
* @vpi: virtual N_Port identifier.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3a658953486c..f64b65a770b8 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -169,6 +169,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
spin_lock_irqsave(shost->host_lock, flags);
if (!vport->stat_data_enabled ||
vport->stat_data_blocked ||
+ !pnode ||
!pnode->lat_data ||
(phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags);
@@ -2040,6 +2041,9 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
unsigned long flags;
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return;
+
/* If there is queuefull or busy condition send a scsi event */
if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
(cmnd->result == SAM_STAT_BUSY)) {
@@ -3226,10 +3230,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
struct lpfc_scsi_buf *lpfc_cmd;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *iocbqrsp;
+ struct lpfc_nodelist *pnode = rdata->pnode;
int ret;
int status;
- if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
return FAILED;
lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -3256,7 +3261,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
"0702 Issue %s to TGT %d LUN %d "
"rpi x%x nlp_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
- rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
+ pnode->nlp_rpi, pnode->nlp_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0d1e187b005d..554efa6623f4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -95,7 +95,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
return -ENOMEM;
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
- bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+ bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
@@ -1735,6 +1735,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
uint16_t rpi, vpi;
int rc;
@@ -1746,7 +1747,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
- (phba->sli_rev == LPFC_SLI_REV4))
+ (phba->sli_rev == LPFC_SLI_REV4) &&
+ (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
/*
@@ -1765,16 +1767,14 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- /* Unreg VPI, if the REG_VPI succeed after VLink failure */
if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
!(phba->pport->load_flag & FC_UNLOADING) &&
!pmb->u.mb.mbxStatus) {
- lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
- pmb->vport = vport;
- pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc != MBX_NOT_FINISHED)
- return;
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(shost->host_lock);
}
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
@@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
*
- * This routine performs a round robin SCSI command to SLI4 FCP WQ index
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
* distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
* held.
*
@@ -5965,7 +5965,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
uint16_t abrt_iotag;
struct lpfc_iocbq *abrtiocbq;
struct ulp_bde64 *bpl = NULL;
- uint32_t els_id = ELS_ID_DEFAULT;
+ uint32_t els_id = LPFC_ELS_ID_DEFAULT;
int numBdes, i;
struct ulp_bde64 bde;
@@ -5982,7 +5982,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
- wqe->words[7] = 0; /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -6033,109 +6033,117 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* contains the FCFI and remote N_Port_ID is
* in word 5.
*/
-
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
- iocbq->iocb.ulpContext);
-
- bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+ bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
-
if (command_type == ELS_COMMAND_FIP) {
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
}
- bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
-
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
break;
case CMD_XMIT_SEQUENCE64_CX:
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
- iocbq->iocb.un.ulpWord[3]);
- wqe->generic.word3 = 0;
- bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.un.ulpWord[3]);
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+ iocbq->iocb.ulpContext);
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
case CMD_XMIT_SEQUENCE64_CR:
- /* word3 iocb=io_tag32 wqe=payload_offset */
- /* payload offset used for multilpe outstanding
- * sequences on the same exchange
- */
- wqe->words[3] = 0;
+ /* word3 iocb=io_tag32 wqe=reserved */
+ wqe->xmit_sequence.rsvd3 = 0;
/* word4 relative_offset memcpy */
/* word5 r_ctl/df_ctl memcpy */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
wqe->xmit_sequence.xmit_len = xmit_len;
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_BCAST64_CN:
- /* word3 iocb=iotag32 wqe=payload_len */
- wqe->words[3] = 0; /* no definition for this in wqe */
+ /* word3 iocb=iotag32 wqe=seq_payload_len */
+ wqe->xmit_bcast64.seq_payload_len = xmit_len;
/* word4 iocb=rsvd wqe=rsvd */
/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
break;
case CMD_FCP_IWRITE64_CR:
command_type = FCP_COMMAND_DATA_OUT;
- /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
- * confusing.
- * word3 is payload_len: byte offset to the sgl entry for the
- * fcp_command.
- * word4 is total xfer len, same as the IOCB->ulpParameter.
- * word5 is initial xfer len 0 = wait for xfer-ready
- */
-
- /* Always wait for xfer-ready before sending data */
- wqe->fcp_iwrite.initial_xfer_len = 0;
- /* word 4 (xfer length) should have been set on the memcpy */
-
- /* allow write to fall through to read */
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ wqe->fcp_iwrite.payload_offset_len =
+ xmit_len + sizeof(struct fcp_rsp);
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+ /* Always open the exchange */
+ bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+ break;
case CMD_FCP_IREAD64_CR:
- /* FCP_CMD is always the 1st sgl entry */
- wqe->fcp_iread.payload_len =
+ /* word3 iocb=iotag wqe=payload_offset_len */
+ /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+ wqe->fcp_iread.payload_offset_len =
xmit_len + sizeof(struct fcp_rsp);
-
- /* word 4 (xfer length) should have been set on the memcpy */
-
- bf_set(lpfc_wqe_gen_erp, &wqe->generic,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
- /* The XC bit and the XS bit are similar. The driver never
- * tracked whether or not the exchange was previouslly open.
- * XC = Exchange create, 0 is create. 1 is already open.
- * XS = link cmd: 1 do not close the exchange after command.
- * XS = 0 close exchange when command completes.
- * The only time we would not set the XC bit is when the XS bit
- * is set and we are sending our 2nd or greater command on
- * this exchange.
- */
+ /* word4 iocb=parameter wqe=total_xfer_length memcpy */
+ /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+ bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+ iocbq->iocb.ulpFCP2Rcvy);
+ bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
- wqe->words[10] &= 0xffff0000; /* zero out ebde count */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
- break;
+ bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+ break;
case CMD_FCP_ICMND64_CR:
+ /* word3 iocb=IO_TAG wqe=reserved */
+ wqe->fcp_icmd.rsrvd3 = 0;
+ bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */
- bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
- wqe->words[4] = 0;
- wqe->words[10] &= 0xffff0000; /* zero out ebde count */
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+ bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
break;
case CMD_GEN_REQUEST64_CR:
- /* word3 command length is described as byte offset to the
- * rsp_data. Would always be 16, sizeof(struct sli4_sge)
- * sgl[0] = cmnd
- * sgl[1] = rsp.
- *
- */
- wqe->gen_req.command_len = xmit_len;
- /* Word4 parameter copied in the memcpy */
- /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+ /* word3 iocb=IO_TAG wqe=request_payload_len */
+ wqe->gen_req.request_payload_len = xmit_len;
+ /* word4 iocb=parameter wqe=relative_offset memcpy */
+ /* word5 [rctl, type, df_ctl, la] copied in memcpy */
/* word6 context tag copied in memcpy */
if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
@@ -6144,31 +6152,39 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
ct, iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
- bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
- iocbq->iocb.ulpTimeout);
-
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+ bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+ bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
/* words0-2 BDE memcpy */
- /* word3 iocb=iotag32 wqe=rsvd */
- wqe->words[3] = 0;
+ /* word3 iocb=iotag32 wqe=response_payload_len */
+ wqe->xmit_els_rsp.response_payload_len = xmit_len;
/* word4 iocb=did wge=rsvd. */
- wqe->words[4] = 0;
+ wqe->xmit_els_rsp.rsvd4 = 0;
/* word5 iocb=rsvd wge=did */
bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
iocbq->iocb.un.elsreq64.remoteID);
-
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
-
- bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
- bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+ ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+ bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
- bf_set(lpfc_wqe_gen_context, &wqe->generic,
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
iocbq->vport->vpi + phba->vpi_base);
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -6193,15 +6209,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
else
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- wqe->words[5] = 0;
- bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+ /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+ wqe->abort_cmd.rsrvd5 = 0;
+ bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
/*
* The abort handler will send us CMD_ABORT_XRI_CN or
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
*/
- bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
cmnd = CMD_ABORT_XRI_CX;
command_type = OTHER_COMMAND;
xritag = 0;
@@ -6235,18 +6255,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
iocbq->iocb.ulpContext);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
/* Overwrite the pre-set comnd type with OTHER_COMMAND */
command_type = OTHER_COMMAND;
break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
- /* words0-2 are all 0's no bde */
- /* word3 and word4 are rsvrd */
- wqe->words[3] = 0;
- wqe->words[4] = 0;
- /* word5 iocb=rsvd wge=did */
- /* There is no remote port id in the IOCB? */
- /* Let this fall through and fail */
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
case CMD_FCP_TRSP64_CX: /* Target mode rcv */
@@ -6257,16 +6273,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
break;
-
}
- bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
- bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
- wqe->generic.abort_tag = abort_tag;
- bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
- bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
- bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
- bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
-
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+ bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
return 0;
}
@@ -7257,25 +7271,26 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
/**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @cmdiocb: Pointer to driver command iocb object.
*
- * This function issues an abort iocb for the provided command
- * iocb. This function is called with hbalock held.
- * The function returns 0 when it fails due to memory allocation
- * failure or when the command iocb is an abort request.
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
**/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *cmdiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
- int retval = IOCB_ERROR;
+ int retval;
/*
* There are certain command types we don't want to abort. And we
@@ -7288,18 +7303,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
- /* If we're unloading, don't abort iocb on the ELS ring, but change the
- * callback so that nothing happens when it finishes.
- */
- if ((vport->load_flag & FC_UNLOADING) &&
- (pring->ringno == LPFC_ELS_RING)) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
- goto abort_iotag_exit;
- }
-
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
@@ -7344,6 +7347,63 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (retval)
__lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+ /*
+ * Caller to this routine should check for IOCB_ERROR
+ * and handle it properly. This routine no longer removes
+ * iocb off txcmplq and call compl in case of IOCB_ERROR.
+ */
+ return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ int retval = IOCB_ERROR;
+ IOCB_t *icmd = NULL;
+
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
+ */
+ icmd = &cmdiocb->iocb;
+ if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+ icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+ return 0;
+
+ /*
+ * If we're unloading, don't abort iocb on the ELS ring, but change
+ * the callback so that nothing happens when it finishes.
+ */
+ if ((vport->load_flag & FC_UNLOADING) &&
+ (pring->ringno == LPFC_ELS_RING)) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ goto abort_iotag_exit;
+ }
+
+ /* Now, we try to issue the abort to the cmdiocb out */
+ retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
@@ -7354,6 +7414,62 @@ abort_iotag_exit:
}
/**
+ * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues abort iocbs unconditionally for all
+ * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
+ * to complete before the return of this function. The caller is not required
+ * to hold any locks.
+ **/
+static void
+lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ if (pring->ringno == LPFC_ELS_RING)
+ lpfc_fabric_abort_hba(phba);
+
+ spin_lock_irq(&phba->hbalock);
+
+ /* Take off all the iocbs on txq for cancelling */
+ list_splice_init(&pring->txq, &completions);
+ pring->txq_cnt = 0;
+
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli_abort_iotag_issue(phba, pring, iocb);
+
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
+ int i;
+
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->ring[i];
+ lpfc_sli_iocb_ring_abort(phba, pring);
+ }
+}
+
+/**
* lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
* @iocbq: Pointer to driver iocb object.
* @vport: Pointer to driver virtual port object.
@@ -12242,13 +12358,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
/* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
+
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= FCF_TS_INPROG;
+ spin_unlock_irq(&phba->hbalock);
+
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
error = -EIO;
else {
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= FCF_DISC_INPROGRESS;
- spin_unlock_irq(&phba->hbalock);
/* Reset eligible FCF count for new scan */
if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
phba->fcf.eligible_fcf_cnt = 0;
@@ -12258,21 +12376,21 @@ fail_fcf_scan:
if (error) {
if (mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
- /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
+ /* FCF scan failed, clear FCF_TS_INPROG flag */
spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+ phba->hba_flag &= ~FCF_TS_INPROG;
spin_unlock_irq(&phba->hbalock);
}
return error;
}
/**
- * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
* This routine is invoked to read an FCF record indicated by @fcf_index
- * and to use it for FLOGI round robin FCF failover.
+ * and to use it for FLOGI roundrobin FCF failover.
*
* Return 0 if the mailbox command is submitted sucessfully, none 0
* otherwise.
@@ -12318,7 +12436,7 @@ fail_fcf_read:
* @fcf_index: FCF table entry offset.
*
* This routine is invoked to read an FCF record indicated by @fcf_index to
- * determine whether it's eligible for FLOGI round robin failover list.
+ * determine whether it's eligible for FLOGI roundrobin failover list.
*
* Return 0 if the mailbox command is submitted sucessfully, none 0
* otherwise.
@@ -12364,7 +12482,7 @@ fail_fcf_read:
*
* This routine is to get the next eligible FCF record index in a round
* robin fashion. If the next eligible FCF record index equals to the
- * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
* shall be returned, otherwise, the next eligible FCF record's index
* shall be returned.
**/
@@ -12392,28 +12510,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
return LPFC_FCOE_FCF_NEXT_NONE;
}
- /* Check roundrobin failover index bmask stop condition */
- if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
- if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
- "2847 Round robin failover FCF index "
- "search hit stop condition:x%x\n",
- next_fcf_index);
- return LPFC_FCOE_FCF_NEXT_NONE;
- }
- /* The roundrobin failover index bmask updated, start over */
- lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2848 Round robin failover FCF index bmask "
- "updated, start over\n");
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
- spin_unlock_irq(&phba->hbalock);
- return phba->fcf.fcf_rr_init_indx;
- }
-
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2845 Get next round robin failover "
- "FCF index x%x\n", next_fcf_index);
+ "2845 Get next roundrobin failover FCF (x%x)\n",
+ next_fcf_index);
+
return next_fcf_index;
}
@@ -12422,7 +12522,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine sets the FCF record index in to the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
* does not go beyond the range of the driver allocated bmask dimension
* before setting the bit.
*
@@ -12434,22 +12534,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
{
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
- "2610 HBA FCF index reached driver's "
- "book keeping dimension: fcf_index:%d, "
- "driver_bmask_max:%d\n",
+ "2610 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
return -EINVAL;
}
/* Set the eligible FCF record index bmask */
set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
- /* Set the roundrobin index bmask updated */
- spin_lock_irq(&phba->hbalock);
- phba->fcf.fcf_flag |= FCF_REDISC_RRU;
- spin_unlock_irq(&phba->hbalock);
-
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2790 Set FCF index x%x to round robin failover "
+ "2790 Set FCF (x%x) to roundrobin FCF failover "
"bmask\n", fcf_index);
return 0;
@@ -12460,7 +12554,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
* @phba: pointer to lpfc hba data structure.
*
* This routine clears the FCF record index from the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
* does not go beyond the range of the driver allocated bmask dimension
* before clearing the bit.
**/
@@ -12469,9 +12563,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
- "2762 HBA FCF index goes beyond driver's "
- "book keeping dimension: fcf_index:%d, "
- "driver_bmask_max:%d\n",
+ "2762 FCF (x%x) reached driver's book "
+ "keeping dimension:x%x\n",
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
return;
}
@@ -12479,7 +12572,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2791 Clear FCF index x%x from round robin failover "
+ "2791 Clear FCF (x%x) from roundrobin failover "
"bmask\n", fcf_index);
}
@@ -12530,8 +12623,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
- "2775 Start FCF rediscovery quiescent period "
- "wait timer before scaning FCF table\n");
+ "2775 Start FCF rediscover quiescent timer\n");
/*
* Start FCF rediscovery wait timer for pending FCF
* before rescan FCF record table.
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a0ca572ec28b..c4483feb8b71 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -19,10 +19,16 @@
*******************************************************************/
#define LPFC_ACTIVE_MBOX_WAIT_CNT 100
+#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
+#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
+#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
#define LPFC_GET_QE_REL_INT 32
#define LPFC_RPI_LOW_WATER_MARK 10
+#define LPFC_UNREG_FCF 1
+#define LPFC_SKIP_UNREG_FCF 0
+
/* Amount of time in seconds for waiting FCF rediscovery to complete */
#define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
@@ -163,9 +169,8 @@ struct lpfc_fcf {
#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
-#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */
+#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
uint32_t addr_mode;
- uint16_t fcf_rr_init_indx;
uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f93120e4c796..7a1b5b112a0b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.17"
+#define LPFC_DRIVER_VERSION "8.3.18"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"