aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_sli.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c345
1 files changed, 283 insertions, 62 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7a61455140b6..e758eae0d0fd 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -455,6 +455,11 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
+
+ if (iocbq)
+ phba->iocb_cnt++;
+ if (phba->iocb_cnt > phba->iocb_max)
+ phba->iocb_max = phba->iocb_cnt;
return iocbq;
}
@@ -575,7 +580,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
struct lpfc_sglq *sglq;
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
- unsigned long iflag;
+ unsigned long iflag = 0;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
@@ -593,6 +599,10 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
} else {
sglq->state = SGL_FREED;
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+
+ /* Check if TXQ queue needs to be serviced */
+ if (pring->txq_cnt)
+ lpfc_worker_wake_up(phba);
}
}
@@ -605,6 +615,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
+
/**
* __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
@@ -642,6 +653,7 @@ static void
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
phba->__lpfc_sli_release_iocbq(phba, iocbq);
+ phba->iocb_cnt--;
}
/**
@@ -872,7 +884,11 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
list_add_tail(&piocb->list, &pring->txcmplq);
+ piocb->iocb_flag |= LPFC_IO_ON_Q;
pring->txcmplq_cnt++;
+ if (pring->txcmplq_cnt > pring->txcmplq_max)
+ pring->txcmplq_max = pring->txcmplq_cnt;
+
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
@@ -897,7 +913,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* the txq, the function returns first iocb in the list after
* removing the iocb from the list, else it returns NULL.
**/
-static struct lpfc_iocbq *
+struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_iocbq *cmd_iocb;
@@ -2150,7 +2166,10 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
- pring->txcmplq_cnt--;
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+ pring->txcmplq_cnt--;
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ }
return cmd_iocb;
}
@@ -2183,7 +2202,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
- pring->txcmplq_cnt--;
+ if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
+ cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ pring->txcmplq_cnt--;
+ }
return cmd_iocb;
}
@@ -3564,13 +3586,16 @@ static int
lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
-
+ uint32_t hba_aer_enabled;
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0296 Restart HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
+ /* Take PCIe device Advanced Error Reporting (AER) state */
+ hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
+
lpfc_sli4_brdreset(phba);
spin_lock_irq(&phba->hbalock);
@@ -3582,6 +3607,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
+ /* Reset HBA AER if it was enabled, note hba_flag was reset above */
+ if (hba_aer_enabled)
+ pci_disable_pcie_error_reporting(phba->pcidev);
+
lpfc_hba_down_post(phba);
return 0;
@@ -3794,7 +3823,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
phba->link_state = LPFC_HBA_ERROR;
mempool_free(pmb, phba->mbox_mem_pool);
- return ENXIO;
+ return -ENXIO;
}
}
phba->hbq_count = hbq_count;
@@ -3885,7 +3914,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
LPFC_SLI3_HBQ_ENABLED |
LPFC_SLI3_CRP_ENABLED |
- LPFC_SLI3_INB_ENABLED |
LPFC_SLI3_BG_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3927,20 +3955,9 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
- if (pmb->u.mb.un.varCfgPort.ginb) {
- phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
- phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
- phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
- phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
- phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
- phba->inb_last_counter =
- phba->mbox->us.s3_inb_pgp.counter;
- } else {
- phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
- phba->port_gp = phba->mbox->us.s3_pgp.port;
- phba->inb_ha_copy = NULL;
- phba->inb_counter = NULL;
- }
+
+ phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
+ phba->port_gp = phba->mbox->us.s3_pgp.port;
if (phba->cfg_enable_bg) {
if (pmb->u.mb.un.varCfgPort.gbg)
@@ -3953,8 +3970,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
} else {
phba->hbq_get = NULL;
phba->port_gp = phba->mbox->us.s2.port;
- phba->inb_ha_copy = NULL;
- phba->inb_counter = NULL;
phba->max_vpi = 0;
}
do_prep_failed:
@@ -4214,7 +4229,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
*vpd_size = mqe->un.read_rev.avail_vpd_len;
- lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
+ memcpy(vpd, dmabuf->virt, *vpd_size);
+
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
@@ -4539,6 +4555,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+ /* Enable PCIe device Advanced Error Reporting (AER) if configured */
+ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
+ rc = pci_enable_pcie_error_reporting(phba->pcidev);
+ if (!rc) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2829 This device supports "
+ "Advanced Error Reporting (AER)\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_AER_ENABLED;
+ spin_unlock_irq(&phba->hbalock);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2830 This device does not support "
+ "Advanced Error Reporting (AER)\n");
+ phba->cfg_aer_support = 0;
+ }
+ }
+
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
@@ -5265,7 +5299,8 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
rc = MBXERR_ERROR;
- }
+ } else
+ lpfc_sli4_swap_str(phba, mboxq);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
@@ -5592,7 +5627,7 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
-static void
+void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
@@ -6209,7 +6244,6 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
- uint16_t xritag;
union lpfc_wqe wqe;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
@@ -6218,10 +6252,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
+ if (pring->txq_cnt) {
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ __lpfc_sli_ringtx_put(phba,
+ pring, piocb);
+ return IOCB_SUCCESS;
+ } else {
+ return IOCB_BUSY;
+ }
+ } else {
sglq = __lpfc_sli_get_sglq(phba);
- if (!sglq)
- return IOCB_ERROR;
- piocb->sli4_xritag = sglq->sli4_xritag;
+ if (!sglq) {
+ if (!(flag & SLI_IOCB_RET_IOCB)) {
+ __lpfc_sli_ringtx_put(phba,
+ pring,
+ piocb);
+ return IOCB_SUCCESS;
+ } else
+ return IOCB_BUSY;
+ }
+ }
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
sglq = NULL; /* These IO's already have an XRI and
@@ -6237,8 +6287,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
if (sglq) {
- xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
- if (xritag != sglq->sli4_xritag)
+ piocb->sli4_xritag = sglq->sli4_xritag;
+
+ if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
return IOCB_ERROR;
}
@@ -6278,7 +6329,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
* IOCB_SUCCESS - Success
* IOCB_BUSY - Busy
**/
-static inline int
+int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
@@ -7095,13 +7146,6 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
abort_iocb = phba->sli.iocbq_lookup[abort_context];
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
- "0327 Cannot abort els iocb %p "
- "with tag %x context %x, abort status %x, "
- "abort code %x\n",
- abort_iocb, abort_iotag, abort_context,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
-
/*
* If the iocb is not found in Firmware queue the iocb
* might have completed already. Do not free it again.
@@ -7120,6 +7164,13 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
abort_context = abort_iocb->iocb.ulpContext;
}
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
+ "0327 Cannot abort els iocb %p "
+ "with tag %x context %x, abort status %x, "
+ "abort code %x\n",
+ abort_iocb, abort_iotag, abort_context,
+ irsp->ulpStatus, irsp->un.ulpWord[4]);
/*
* make sure we have the right iocbq before taking it
* off the txcmplq and try to call completion routine.
@@ -7137,7 +7188,10 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* following abort XRI from the HBA.
*/
list_del_init(&abort_iocb->list);
- pring->txcmplq_cnt--;
+ if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
+ abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
+ pring->txcmplq_cnt--;
+ }
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
@@ -7269,8 +7323,9 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort xri x%x, original iotag x%x, "
"abort cmd iotag x%x\n",
+ iabt->un.acxri.abortIoTag,
iabt->un.acxri.abortContextTag,
- iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
+ abtsiocbp->iotag);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
if (retval)
@@ -7600,7 +7655,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
-
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
@@ -7622,7 +7677,8 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
readl(phba->HCregaddr); /* flush */
}
- retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
+ retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
+ SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
timeleft = wait_event_timeout(done_q,
@@ -7644,6 +7700,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
timeout, (timeleft / jiffies));
retval = IOCB_TIMEDOUT;
}
+ } else if (retval == IOCB_BUSY) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+ phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+ return retval;
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0332 IOCB wait issue failed, Data x%x\n",
@@ -7724,9 +7785,10 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
*/
- if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
+ if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
retval = MBX_SUCCESS;
- else {
+ lpfc_sli4_swap_str(phba, pmboxq);
+ } else {
retval = MBX_TIMEOUT;
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
@@ -8789,12 +8851,17 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
{
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
/* Get an irspiocbq for later ELS response processing use */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0387 Failed to allocate an iocbq\n");
+ "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
+ "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
+ pring->txq_cnt, phba->iocb_cnt,
+ phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
+ phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
return false;
}
@@ -9043,9 +9110,10 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
}
}
if (unlikely(!cq)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0365 Slow-path CQ identifier (%d) does "
- "not exist\n", cqid);
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0365 Slow-path CQ identifier "
+ "(%d) does not exist\n", cqid);
return;
}
@@ -9208,6 +9276,7 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
{
struct lpfc_wcqe_release wcqe;
bool workposted = false;
+ unsigned long iflag;
/* Copy the work queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
@@ -9216,6 +9285,9 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE:
/* Process the WQ complete event */
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ phba->last_completion_time = jiffies;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli4_fp_handle_fcp_wcqe(phba,
(struct lpfc_wcqe_complete *)&wcqe);
break;
@@ -9271,9 +9343,10 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
if (unlikely(!cq)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0367 Fast-path completion queue does not "
- "exist\n");
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0367 Fast-path completion queue "
+ "does not exist\n");
return;
}
@@ -11898,12 +11971,26 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
* available rpis maintained by the driver.
**/
void
+__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+ if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
+ phba->sli4_hba.rpi_count--;
+ phba->sli4_hba.max_cfg_param.rpi_used--;
+ }
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
spin_lock_irq(&phba->hbalock);
- clear_bit(rpi, phba->sli4_hba.rpi_bmask);
- phba->sli4_hba.rpi_count--;
- phba->sli4_hba.max_cfg_param.rpi_used--;
+ __lpfc_sli4_free_rpi(phba, rpi);
spin_unlock_irq(&phba->hbalock);
}
@@ -12318,18 +12405,47 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
{
uint16_t next_fcf_index;
- /* Search from the currently registered FCF index */
+ /* Search start from next bit of currently registered FCF index */
+ next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
+ LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX,
- phba->fcf.current_rec.fcf_indx);
+ next_fcf_index);
+
/* Wrap around condition on phba->fcf.fcf_rr_bmask */
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
- /* Round robin failover stop condition */
- if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
+
+ /* Check roundrobin failover list empty condition */
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2844 No roundrobin failover FCF available\n");
return LPFC_FCOE_FCF_NEXT_NONE;
+ }
+
+ /* Check roundrobin failover index bmask stop condition */
+ if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
+ if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "2847 Round robin failover FCF index "
+ "search hit stop condition:x%x\n",
+ next_fcf_index);
+ return LPFC_FCOE_FCF_NEXT_NONE;
+ }
+ /* The roundrobin failover index bmask updated, start over */
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2848 Round robin failover FCF index bmask "
+ "updated, start over\n");
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
+ spin_unlock_irq(&phba->hbalock);
+ return phba->fcf.fcf_rr_init_indx;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2845 Get next round robin failover "
+ "FCF index x%x\n", next_fcf_index);
return next_fcf_index;
}
@@ -12359,11 +12475,20 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
/* Set the eligible FCF record index bmask */
set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+ /* Set the roundrobin index bmask updated */
+ spin_lock_irq(&phba->hbalock);
+ phba->fcf.fcf_flag |= FCF_REDISC_RRU;
+ spin_unlock_irq(&phba->hbalock);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2790 Set FCF index x%x to round robin failover "
+ "bmask\n", fcf_index);
+
return 0;
}
/**
- * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
+ * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
* @phba: pointer to lpfc hba data structure.
*
* This routine clears the FCF record index from the eligible bmask for
@@ -12384,6 +12509,10 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
}
/* Clear the eligible FCF record index bmask */
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "2791 Clear FCF index x%x from round robin failover "
+ "bmask\n", fcf_index);
}
/**
@@ -12446,7 +12575,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
}
/**
- * lpfc_sli4_redisc_all_fcf - Request to rediscover entire FCF table by port.
+ * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to request for rediscovery of the entire FCF table
@@ -12662,6 +12791,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
LPFC_MBOXQ_t *mb, *nextmb;
struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
@@ -12673,6 +12803,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
continue;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ __lpfc_sli4_free_rpi(phba,
+ mb->u.mb.un.varRegLogin.rpi);
mp = (struct lpfc_dmabuf *) (mb->context1);
if (mp) {
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -12680,6 +12813,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
}
ndlp = (struct lpfc_nodelist *) mb->context2;
if (ndlp) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock_irq(shost->host_lock);
lpfc_nlp_put(ndlp);
mb->context2 = NULL;
}
@@ -12695,6 +12831,9 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
ndlp = (struct lpfc_nodelist *) mb->context2;
if (ndlp) {
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
+ spin_unlock_irq(shost->host_lock);
lpfc_nlp_put(ndlp);
mb->context2 = NULL;
}
@@ -12705,3 +12844,85 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
}
+/**
+ * lpfc_drain_txq - Drain the txq
+ * @phba: Pointer to HBA context object.
+ *
+ * This function attempt to submit IOCBs on the txq
+ * to the adapter. For SLI4 adapters, the txq contains
+ * ELS IOCBs that have been deferred because the there
+ * are no SGLs. This congestion can occur with large
+ * vport counts during node discovery.
+ **/
+
+uint32_t
+lpfc_drain_txq(struct lpfc_hba *phba)
+{
+ LIST_HEAD(completions);
+ struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_iocbq *piocbq = 0;
+ unsigned long iflags = 0;
+ char *fail_msg = NULL;
+ struct lpfc_sglq *sglq;
+ union lpfc_wqe wqe;
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (pring->txq_cnt > pring->txq_max)
+ pring->txq_max = pring->txq_cnt;
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ while (pring->txq_cnt) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+
+ sglq = __lpfc_sli_get_sglq(phba);
+ if (!sglq) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ } else {
+ piocbq = lpfc_sli_ringtx_get(phba, pring);
+ if (!piocbq) {
+ /* The txq_cnt out of sync. This should
+ * never happen
+ */
+ sglq = __lpfc_clear_active_sglq(phba,
+ sglq->sli4_xritag);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2823 txq empty and txq_cnt is %d\n ",
+ pring->txq_cnt);
+ break;
+ }
+ }
+
+ /* The xri and iocb resources secured,
+ * attempt to issue request
+ */
+ piocbq->sli4_xritag = sglq->sli4_xritag;
+ if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
+ fail_msg = "to convert bpl to sgl";
+ else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
+ fail_msg = "to convert iocb to wqe";
+ else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ fail_msg = " - Wq is full";
+ else
+ lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+
+ if (fail_msg) {
+ /* Failed means we can't issue and need to cancel */
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2822 IOCB failed %s iotag 0x%x "
+ "xri 0x%x\n",
+ fail_msg,
+ piocbq->iotag, piocbq->sli4_xritag);
+ list_add_tail(&piocbq->list, &completions);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
+ /* Cancel all the IOCBs that cannot be issued */
+ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+
+ return pring->txq_cnt;
+}