aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-21 10:50:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-21 10:50:15 -0700
commit10fd71780f7d155f4e35fecfad0ebd4a725a244b (patch)
treeb88976120fd8f620669ed239842ea26ecc2c5e52 /drivers/scsi/lpfc
parentMerge tag 'for-5.4/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm (diff)
parentscsi: hisi_sas: Fix the conflict between device gone and host reset (diff)
downloadlinux-dev-10fd71780f7d155f4e35fecfad0ebd4a725a244b.tar.xz
linux-dev-10fd71780f7d155f4e35fecfad0ebd4a725a244b.zip
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: qla2xxx, ufs, smartpqi, lpfc, hisi_sas, qedf, mpt3sas; plus a whole load of minor updates. The only core change this time around is the addition of request batching for virtio. Since batching requires an additional flag to use, it should be invisible to the rest of the drivers" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (264 commits) scsi: hisi_sas: Fix the conflict between device gone and host reset scsi: hisi_sas: Add BIST support for phy loopback scsi: hisi_sas: Add hisi_sas_debugfs_alloc() to centralise allocation scsi: hisi_sas: Remove some unused function arguments scsi: hisi_sas: Remove redundant work declaration scsi: hisi_sas: Remove hisi_sas_hw.slot_complete scsi: hisi_sas: Assign NCQ tag for all NCQ commands scsi: hisi_sas: Update all the registers after suspend and resume scsi: hisi_sas: Retry 3 times TMF IO for SAS disks when init device scsi: hisi_sas: Remove sleep after issue phy reset if sas_smp_phy_control() fails scsi: hisi_sas: Directly return when running I_T_nexus reset if phy disabled scsi: hisi_sas: Use true/false as input parameter of sas_phy_reset() scsi: hisi_sas: add debugfs auto-trigger for internal abort time out scsi: virtio_scsi: unplug LUNs when events missed scsi: scsi_dh_rdac: zero cdb in send_mode_select() scsi: fcoe: fix null-ptr-deref Read in fc_release_transport scsi: ufs-hisi: use devm_platform_ioremap_resource() to simplify code scsi: ufshcd: use devm_platform_ioremap_resource() to simplify code scsi: hisi_sas: use devm_platform_ioremap_resource() to simplify code scsi: ufs: Use kmemdup in ufshcd_read_string_desc() ...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r--drivers/scsi/lpfc/lpfc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c228
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h61
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c926
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c389
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c591
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c533
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
23 files changed, 1974 insertions, 1498 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index bade2e025ecf..691acbdcc46d 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -51,6 +51,8 @@ struct lpfc_sli2_slim;
cmnd for menlo needs nearly twice as for firmware
downloads using bsg */
+#define LPFC_DEFAULT_XPSGL_SIZE 256
+#define LPFC_MAX_SG_TABLESIZE 0xffff
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
@@ -732,14 +734,13 @@ struct lpfc_hba {
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
-#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
+#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
@@ -795,10 +796,12 @@ struct lpfc_hba {
uint8_t mds_diags_support;
uint8_t bbcredit_support;
uint8_t enab_exp_wqcq_pages;
+ u8 nsler; /* Firmware supports FC-NVMe-2 SLER */
/* HBA Config Parameters */
uint32_t cfg_ack0;
uint32_t cfg_xri_rebalancing;
+ uint32_t cfg_xpsgl;
uint32_t cfg_enable_npiv;
uint32_t cfg_enable_rrq;
uint32_t cfg_topology;
@@ -905,6 +908,7 @@ struct lpfc_hba {
wait_queue_head_t work_waitq;
struct task_struct *worker_thread;
unsigned long data_flags;
+ uint32_t border_sge_num;
uint32_t hbq_in_use; /* HBQs in use flag */
uint32_t hbq_count; /* Count of configured HBQs */
@@ -987,6 +991,7 @@ struct lpfc_hba {
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct dma_pool *txrdy_payload_pool;
+ struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@@ -1034,8 +1039,6 @@ struct lpfc_hba {
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
- struct dentry *debug_dumpData; /* BlockGuard BPL */
- struct dentry *debug_dumpDif; /* BlockGuard BPL */
struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d65558619ab0..25aa7a53d255 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -841,7 +841,8 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
lpfc_vpd_t *vp = &phba->vpd;
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
- return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
+ return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
+ vp->rev.smRev, vp->rev.smFwRev);
}
/**
@@ -3682,8 +3683,8 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (rport)
remoteport = rport->remoteport;
spin_unlock(&vport->phba->hbalock);
- if (remoteport)
- nvme_fc_set_remoteport_devloss(rport->remoteport,
+ if (rport && remoteport)
+ nvme_fc_set_remoteport_devloss(remoteport,
vport->cfg_devloss_tmo);
#endif
}
@@ -5467,15 +5468,12 @@ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
* lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
* For the Initiator (I), enabling this parameter means that an NVMET
* PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
- * processed by the initiator for subsequent NVME FCP IO. For the target
- * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
- * driver parameter as the target function's first burst size returned to the
- * initiator in the target's NVME PRLI response. Parameter supported on physical
- * port only - no NPIV support.
+ * processed by the initiator for subsequent NVME FCP IO.
+ * Currently, this feature is not supported on the NVME target
* Value range is [0,1]. Default value is 0 (disabled).
*/
LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
- "Enable First Burst feature on I and T functions.");
+ "Enable First Burst feature for NVME Initiator.");
/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
@@ -5927,7 +5925,7 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
* 1 = MDS Diagnostics enabled
* Value range is [0,1]. Default value is 0.
*/
-LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
/*
* lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
@@ -6859,10 +6857,31 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
static void
lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
+ struct lpfc_rport_data *rdata = rport->dd_data;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_nvme_rport *nrport = NULL;
+#endif
+
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ dev_info(&rport->dev, "Cannot find remote node to "
+ "set rport dev loss tmo, port_id x%x\n",
+ rport->port_id);
+ return;
+ }
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ nrport = lpfc_ndlp_get_nrport(ndlp);
+
+ if (nrport && nrport->remoteport)
+ nvme_fc_set_remoteport_devloss(nrport->remoteport,
+ rport->dev_loss_tmo);
+#endif
}
/**
@@ -7059,6 +7078,21 @@ struct fc_function_template lpfc_vport_transport_functions = {
};
/**
+ * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
+ * Mode
+ * @phba: lpfc_hba pointer.
+ **/
+static void
+lpfc_get_hba_function_mode(struct lpfc_hba *phba)
+{
+ /* If it's a SkyHawk FCoE adapter */
+ if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
+ phba->hba_flag |= HBA_FCOE_MODE;
+ else
+ phba->hba_flag &= ~HBA_FCOE_MODE;
+}
+
+/**
* lpfc_get_cfgparam - Used during probe_one to init the adapter structure
* @phba: lpfc_hba pointer.
**/
@@ -7114,8 +7148,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
else
phba->cfg_poll = lpfc_poll;
- if (phba->cfg_enable_bg)
+ /* Get the function mode */
+ lpfc_get_hba_function_mode(phba);
+
+ /* BlockGuard allowed for FC only. */
+ if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0581 BlockGuard feature not supported\n");
+ /* If set, clear the BlockGuard support param */
+ phba->cfg_enable_bg = 0;
+ } else if (phba->cfg_enable_bg) {
phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+ }
lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
@@ -7175,16 +7219,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
-
- /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
- */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
-
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b7216d694bff..39a736b887b1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1040,7 +1040,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!dmabuf) {
lpfc_printf_log(phba, KERN_ERR,
LOG_LIBDFC, "2616 No dmabuf "
- "found for iocbq 0x%p\n",
+ "found for iocbq x%px\n",
iocbq);
kfree(evt_dat->data);
kfree(evt_dat);
@@ -1276,9 +1276,7 @@ lpfc_bsg_hba_set_event(struct bsg_job *job)
return 0; /* call job done later */
job_error:
- if (dd_data != NULL)
- kfree(dd_data);
-
+ kfree(dd_data);
job->dd_data = NULL;
return rc;
}
@@ -1571,7 +1569,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
- ctiocb->iocb_cmpl = NULL;
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
ctiocb->context1 = dd_data;
@@ -5451,7 +5448,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5530,8 +5529,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5591,7 +5591,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5673,7 +5675,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
ras_job_error:
bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5744,8 +5748,9 @@ lpfc_get_trunk_info(struct bsg_job *job)
phba->sli4_hba.link_state.logical_speed / 1000;
job_error:
bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 68e9f96242d3..b2ad8c750486 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
-void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
+void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -433,16 +433,6 @@ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
uint16_t *, uint16_t *);
-/* externs BlockGuard */
-extern char *_dump_buf_data;
-extern unsigned long _dump_buf_data_order;
-extern char *_dump_buf_dif;
-extern unsigned long _dump_buf_dif_order;
-extern spinlock_t _dump_buf_lock;
-extern int _dump_buf_done;
-extern spinlock_t pgcnt_lock;
-extern unsigned int pgcnt;
-
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
@@ -595,6 +585,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
+void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index ec72c39997d2..25e86706e207 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -462,6 +462,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
+ (fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
@@ -480,10 +481,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0238 Process x%06x NameServer Rsp "
- "Data: x%x x%x x%x x%x\n", Did,
+ "Data: x%x x%x x%x x%x x%x\n", Did,
ndlp->nlp_flag, ndlp->nlp_fc4_type,
- vport->fc_flag,
+ ndlp->nlp_state, vport->fc_flag,
vport->fc_rscn_id_cnt);
+
+ /* if ndlp needs to be discovered and prior
+ * state of ndlp hit devloss, change state to
+ * allow rediscovery.
+ */
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
@@ -491,9 +502,9 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
- "Data: x%x x%x\n", Did,
- vport->fc_flag,
- vport->fc_rscn_id_cnt);
+ "Data: x%x x%x %p\n",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt, ndlp);
}
} else {
if (!(vport->fc_flag & FC_RSCN_MODE) ||
@@ -751,9 +762,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0208 NameServer Rsp Data: x%x x%x\n",
+ "0208 NameServer Rsp Data: x%x x%x "
+ "sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -814,6 +827,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4216 GID_FT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -1209,14 +1227,34 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "3064 Setting ndlp %p, DID x%06x with "
- "FC4 x%08x, Data: x%08x x%08x\n",
+ "3064 Setting ndlp x%px, DID x%06x "
+ "with FC4 x%08x, Data: x%08x x%08x "
+ "%d\n",
ndlp, did, ndlp->nlp_fc4_type,
- FC_TYPE_FCP, FC_TYPE_NVME);
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
-
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ FC_TYPE_FCP, FC_TYPE_NVME,
+ ndlp->nlp_state);
+
+ if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
+ ndlp->nlp_fc4_type) {
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ } else if (!ndlp->nlp_fc4_type) {
+ /* If fc4 type is still unknown, then LOGO */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "6443 Sending LOGO ndlp x%px,"
+ "DID x%06x with fc4_type: "
+ "x%08x, state: %d\n",
+ ndlp, did, ndlp->nlp_fc4_type,
+ ndlp->nlp_state);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
}
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -2515,7 +2553,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
+ ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
(uint32_t) hsp->cmn.bbRcvSizeLsb;
ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
size = FOURBYTES + sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 1ee857d9d165..8d34be60d379 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -361,7 +361,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
len += scnprintf(buf+len, size-len,
- "Buf%d: %p %06x\n", i,
+ "Buf%d: x%px %06x\n", i,
hbq_buf->dbuf.virt, hbq_buf->tag);
found = 1;
break;
@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
- spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
- spin_lock(&qp->abts_nvme_buf_list_lock);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp->abts_nvme_io_bufs, out);
spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock(&qp->io_buf_list_get_lock);
- spin_unlock(&qp->abts_nvme_buf_list_lock);
- spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
lpfc_debugfs_last_xripool++;
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
continue;
pbl_pool = &multixri_pool->pbl_pool;
pvt_pool = &multixri_pool->pvt_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
- if (qp->nvme_wq)
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
scnprintf(tmp, sizeof(tmp),
"%03d: %4d %4d %4d %4d | %10d %10d ",
@@ -2166,89 +2162,6 @@ out:
return rc;
}
-static int
-lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug;
- int rc = -ENOMEM;
-
- if (!_dump_buf_data)
- return -EBUSY;
-
- debug = kmalloc(sizeof(*debug), GFP_KERNEL);
- if (!debug)
- goto out;
-
- /* Round to page boundary */
- pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
- __func__, _dump_buf_data);
- debug->buffer = _dump_buf_data;
- if (!debug->buffer) {
- kfree(debug);
- goto out;
- }
-
- debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
- file->private_data = debug;
-
- rc = 0;
-out:
- return rc;
-}
-
-static int
-lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug;
- int rc = -ENOMEM;
-
- if (!_dump_buf_dif)
- return -EBUSY;
-
- debug = kmalloc(sizeof(*debug), GFP_KERNEL);
- if (!debug)
- goto out;
-
- /* Round to page boundary */
- pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
- __func__, _dump_buf_dif, file);
- debug->buffer = _dump_buf_dif;
- if (!debug->buffer) {
- kfree(debug);
- goto out;
- }
-
- debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
- file->private_data = debug;
-
- rc = 0;
-out:
- return rc;
-}
-
-static ssize_t
-lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
-{
- /*
- * The Data/DIF buffers only save one failing IO
- * The write op is used as a reset mechanism after an IO has
- * already been saved to the next one can be saved
- */
- spin_lock(&_dump_buf_lock);
-
- memset((void *)_dump_buf_data, 0,
- ((1 << PAGE_SHIFT) << _dump_buf_data_order));
- memset((void *)_dump_buf_dif, 0,
- ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
-
- _dump_buf_done = 0;
-
- spin_unlock(&_dump_buf_lock);
-
- return nbytes;
-}
-
static ssize_t
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
@@ -2461,17 +2374,6 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
return 0;
}
-static int
-lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug = file->private_data;
-
- debug->buffer = NULL;
- kfree(debug);
-
- return 0;
-}
-
/**
* lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics
* @file: The file pointer to read from.
@@ -3786,23 +3688,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
int qidx;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
- if (qp->assoc_qid != cq_id)
- continue;
- *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
- if (*len >= max_cnt)
- return 1;
- }
- }
return 0;
}
@@ -3868,9 +3760,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
struct lpfc_queue *qp;
int rc;
- qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[eqidx].io_cq;
- *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+ *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
@@ -3878,28 +3770,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
if (*len >= max_cnt)
return 1;
- rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
+ rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
-
- *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
-
- /* Reset max counter */
- qp->CQ_max_cqe = 0;
-
- if (*len >= max_cnt)
- return 1;
-
- rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
- max_cnt, qp->queue_id);
- if (rc)
- return 1;
- }
-
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
@@ -4348,7 +4223,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[qidx].io_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -4360,22 +4235,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
- /* NVME complete queue */
- if (phba->sli4_hba.hdwq) {
- qidx = 0;
- do {
- qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- } while (++qidx < phba->cfg_hdw_queue);
- }
goto error_out;
break;
case LPFC_IDIAG_MQ:
@@ -4419,20 +4278,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
/* FCP/SCSI work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- }
- /* NVME work queue */
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -5508,26 +5354,6 @@ static const struct file_operations lpfc_debugfs_op_cpucheck = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_dumpData
-static const struct file_operations lpfc_debugfs_op_dumpData = {
- .owner = THIS_MODULE,
- .open = lpfc_debugfs_dumpData_open,
- .llseek = lpfc_debugfs_lseek,
- .read = lpfc_debugfs_read,
- .write = lpfc_debugfs_dumpDataDif_write,
- .release = lpfc_debugfs_dumpDataDif_release,
-};
-
-#undef lpfc_debugfs_op_dumpDif
-static const struct file_operations lpfc_debugfs_op_dumpDif = {
- .owner = THIS_MODULE,
- .open = lpfc_debugfs_dumpDif_open,
- .llseek = lpfc_debugfs_lseek,
- .read = lpfc_debugfs_read,
- .write = lpfc_debugfs_dumpDataDif_write,
- .release = lpfc_debugfs_dumpDataDif_release,
-};
-
#undef lpfc_debugfs_op_dif_err
static const struct file_operations lpfc_debugfs_op_dif_err = {
.owner = THIS_MODULE,
@@ -5924,20 +5750,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
} else
phba->debug_dumpHostSlim = NULL;
- /* Setup dumpData */
- snprintf(name, sizeof(name), "dumpData");
- phba->debug_dumpData =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpData);
-
- /* Setup dumpDif */
- snprintf(name, sizeof(name), "dumpDif");
- phba->debug_dumpDif =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpDif);
-
/* Setup DIF Error Injections */
snprintf(name, sizeof(name), "InjErrLBA");
phba->debug_InjErrLBA =
@@ -6315,12 +6127,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
phba->debug_dumpHostSlim = NULL;
- debugfs_remove(phba->debug_dumpData); /* dumpData */
- phba->debug_dumpData = NULL;
-
- debugfs_remove(phba->debug_dumpDif); /* dumpDif */
- phba->debug_dumpDif = NULL;
-
debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
phba->debug_InjErrLBA = NULL;
@@ -6442,12 +6248,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_wq(phba, DUMP_IO, idx);
lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba);
@@ -6459,12 +6260,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_cq(phba, DUMP_IO, idx);
/*
* Dump Event Queues (EQs)
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 34070874616d..20f2537af511 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -291,8 +291,7 @@ struct lpfc_idiag {
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
enum {
- DUMP_FCP,
- DUMP_NVME,
+ DUMP_IO,
DUMP_MBX,
DUMP_ELS,
DUMP_NVMELS,
@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
struct lpfc_queue *wq;
char *qtypestr;
- if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
- qtypestr = "FCP";
- } else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
- qtypestr = "NVME";
+ if (qtype == DUMP_IO) {
+ wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+ qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
qtypestr = "MBX";
@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
} else
return;
- if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ if (qtype == DUMP_IO)
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
qtypestr, wqidx, wq->queue_id);
else
@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr;
int eqidx;
- /* fcp/nvme wq and cq are 1:1, thus same indexes */
+ /* io wq and cq are 1:1, thus same indexes */
eq = NULL;
- if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
- cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
- qtypestr = "FCP";
- } else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
- cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
- qtypestr = "NVME";
+ if (qtype == DUMP_IO) {
+ wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+ cq = phba->sli4_hba.hdwq[wqidx].io_cq;
+ qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
cq = phba->sli4_hba.mbx_cq;
@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
eq = phba->sli4_hba.hdwq[0].hba_eq;
}
- if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ if (qtype == DUMP_IO)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
- if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
+ if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
break;
if (wq_idx < phba->cfg_hdw_queue) {
- pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
- return;
- }
-
- for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
- if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
- break;
- if (wq_idx < phba->cfg_hdw_queue) {
- pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
+ pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
return;
}
@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
int cq_idx;
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
- if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
- break;
-
- if (cq_idx < phba->cfg_hdw_queue) {
- pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
- return;
- }
-
- for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
- if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
+ if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
break;
if (cq_idx < phba->cfg_hdw_queue) {
- pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
+ pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1c89c9f314fa..482e4a888dae 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -112,6 +112,8 @@ struct lpfc_nodelist {
uint8_t nlp_retry; /* used for ELS retries */
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
+ u8 nlp_nvme_info; /* NVME NSLER Support */
+#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
uint16_t nlp_usg_map; /* ndlp management usage bitmap */
#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
@@ -157,6 +159,7 @@ struct lpfc_node_rrq {
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
+#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */
#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */
#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f12780f4cfbb..d5303994bfd6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1052,17 +1052,18 @@ stop_rr_fcf_flogi:
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x "
+ "xri x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ cmdiocb->sli4_xritag, irsp->ulpTimeout);
+
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout);
-
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1207,6 +1208,39 @@ out:
}
/**
+ * lpfc_cmpl_els_link_down - Completion callback function for ELS command
+ * aborted during a link down
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ */
+static void
+lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp;
+ uint32_t *pcmd;
+ uint32_t cmd;
+
+ pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+ cmd = *pcmd;
+ irsp = &rspiocb->iocb;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "6445 ELS completes after LINK_DOWN: "
+ " Status %x/%x cmd x%x flg x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
+ cmdiocb->iocb_flag);
+
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
+ cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ atomic_dec(&phba->fabric_iocb_count);
+ }
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
* lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -2107,7 +2141,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
!(vport->fc_flag & FC_OFFLINE_MODE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4110 Issue PLOGI x%x deferred "
- "on NPort x%x rpi x%x Data: %p\n",
+ "on NPort x%x rpi x%x Data: x%px\n",
ndlp->nlp_defer_did, ndlp->nlp_DID,
ndlp->nlp_rpi, ndlp);
@@ -2401,6 +2435,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr_nvme = (struct lpfc_nvme_prli *)pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
+ if (phba->nsler) {
+ bf_set(prli_nsler, npr_nvme, 1);
+ bf_set(prli_conf, npr_nvme, 1);
+ }
/* Only initiators request first burst. */
if ((phba->cfg_nvme_enable_fb) &&
@@ -4203,7 +4241,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+ "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5634,16 +5672,16 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
if (vport->fc_flag & FC_FABRIC) {
memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
- sizeof(desc->port_names.wwnn));
+ sizeof(desc->port_names.wwnn));
memcpy(desc->port_names.wwpn, &vport->fabric_portname,
- sizeof(desc->port_names.wwpn));
+ sizeof(desc->port_names.wwpn));
} else { /* Point to Point */
memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
- sizeof(desc->port_names.wwnn));
+ sizeof(desc->port_names.wwnn));
- memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
- sizeof(desc->port_names.wwpn));
+ memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
+ sizeof(desc->port_names.wwpn));
}
desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -6327,7 +6365,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
}
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ /* Check to see if we need to NVME rescan this target
+ * remoteport.
+ */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
lpfc_nvme_rescan_port(vport, ndlp);
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -6441,7 +6483,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*lp, vport->fc_flag, payload_len);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ /* Check to see if we need to NVME rescan this target
+ * remoteport.
+ */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
lpfc_nvme_rescan_port(vport, ndlp);
return 0;
}
@@ -7960,18 +8006,40 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ /* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
if (piocb->iocb_flag & LPFC_IO_LIBDFC)
continue;
if (piocb->vport != vport)
continue;
- list_add_tail(&piocb->dlist, &abort_list);
+
+ /* On the ELS ring we can have ELS_REQUESTs or
+ * GEN_REQUESTs waiting for a response.
+ */
+ cmd = &piocb->iocb;
+ if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ list_add_tail(&piocb->dlist, &abort_list);
+
+ /* If the link is down when flushing ELS commands
+ * the firmware will not complete them till after
+ * the link comes back up. This may confuse
+ * discovery for the new link up, so we need to
+ * change the compl routine to just clean up the iocb
+ * and avoid any retry logic.
+ */
+ if (phba->link_state == LPFC_LINK_DOWN)
+ piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+ }
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+ list_add_tail(&piocb->dlist, &abort_list);
}
+
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
- /* Abort each iocb on the aborted list and remove the dlist links. */
+
+ /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
@@ -7987,6 +8055,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ /* No need to abort the txq list,
+ * just queue them up for lpfc_sli_cancel_iocbs
+ */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -8007,11 +8078,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_del_init(&piocb->list);
list_add_tail(&piocb->list, &abort_list);
}
+
+ /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
+ if (vport == phba->pport) {
+ list_for_each_entry_safe(piocb, tmp_iocb,
+ &phba->fabric_iocb_list, list) {
+ cmd = &piocb->iocb;
+ list_del_init(&piocb->list);
+ list_add_tail(&piocb->list, &abort_list);
+ }
+ }
+
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
- /* Cancell all the IOCBs from the completions list */
+ /* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 28ecaa7fc715..749286acdc17 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -118,6 +118,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_work_evt *evtp;
int put_node;
int put_rport;
+ unsigned long iflags;
rdata = rport->dd_data;
ndlp = rdata->pnode;
@@ -132,7 +133,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+ "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/* Don't defer this if we are in the process of deleting the vport
@@ -170,22 +171,22 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
}
shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
/* We need to hold the node by incrementing the reference
* count until this queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
lpfc_worker_wake_up(phba);
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -212,14 +213,15 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
int put_node;
int warn_on = 0;
int fcf_inuse = 0;
+ unsigned long iflags;
rport = ndlp->rport;
vport = ndlp->vport;
shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
if (!rport)
return fcf_inuse;
@@ -235,7 +237,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+ "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/*
@@ -903,6 +905,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->sli4_hba.link_state.logical_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
}
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_LBIT;
@@ -3115,8 +3119,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
int rc;
struct fcf_record *fcf_record;
uint32_t fc_flags = 0;
+ unsigned long iflags;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -3213,12 +3218,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
vport->fc_myDID = phba->fc_pref_DID;
fc_flags |= FC_LBIT;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
if (fc_flags) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
vport->fc_flag |= fc_flags;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
}
lpfc_linkup(phba);
@@ -3292,22 +3297,22 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
* The driver is expected to do FIP/FCF. Call the port
* and get the FCF Table.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->hba_flag & FCF_TS_INPROG) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
/* This is the initial FCF discovery scan */
phba->fcf.fcf_flag |= FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2778 Start FCF table scan at linkup\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
/* Reset FCF roundrobin bmask for new discovery */
@@ -3318,7 +3323,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+ "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
return;
@@ -3366,6 +3371,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
uint8_t attn_type;
+ unsigned long iflags;
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
@@ -3387,12 +3393,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
if (bf_get(lpfc_mbx_read_top_pb, la))
vport->fc_flag |= FC_BYPASSED_MODE;
else
vport->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
@@ -3403,12 +3409,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->fc_eventTag = la->eventTag;
if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_mbx_read_top_mm, la))
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
phba->link_events++;
@@ -3529,7 +3535,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->ctx_ndlp = NULL;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4041,7 +4047,7 @@ out:
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+ "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4160,7 +4166,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 rport register x%06x, rport %p role x%x\n",
+ "3183 rport register x%06x, rport x%px role x%x\n",
ndlp->nlp_DID, rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
@@ -4184,7 +4190,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3184 rport unregister x%06x, rport %p\n",
+ "3184 rport unregister x%06x, rport x%px\n",
ndlp->nlp_DID, rport);
fc_remote_port_delete(rport);
@@ -4196,8 +4202,9 @@ static void
lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
switch (state) {
case NLP_STE_UNUSED_NODE:
vport->fc_unused_cnt += count;
@@ -4227,7 +4234,7 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
vport->fc_npr_cnt += count;
break;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
}
static void
@@ -4480,9 +4487,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return NULL;
if (phba->sli_rev == LPFC_SLI_REV4) {
- rpi = lpfc_sli4_alloc_rpi(vport->phba);
- if (rpi == LPFC_RPI_ALLOC_ERROR)
+ if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
+ rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ else
+ rpi = ndlp->nlp_rpi;
+
+ if (rpi == LPFC_RPI_ALLOC_ERROR) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0359 %s: ndlp:x%px "
+ "usgmap:x%x refcnt:%d FAILED RPI "
+ " ALLOC\n",
+ __func__,
+ (void *)ndlp, ndlp->nlp_usg_map,
+ kref_read(&ndlp->kref));
return NULL;
+ }
}
spin_lock_irqsave(&phba->ndlp_lock, flags);
@@ -4490,9 +4509,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (NLP_CHK_FREE_REQ(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0277 lpfc_enable_node: ndlp:x%p "
+ "0277 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
goto free_rpi;
}
@@ -4500,9 +4519,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (NLP_CHK_NODE_ACT(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0278 lpfc_enable_node: ndlp:x%p "
+ "0278 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
goto free_rpi;
}
@@ -4532,7 +4551,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4541,6 +4560,14 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (state != NLP_STE_UNUSED_NODE)
lpfc_nlp_set_state(vport, ndlp, state);
+ else
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0013 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x x%px STATE=UNUSED\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map, ndlp);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node enable: did:x%x",
@@ -4797,7 +4824,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
(ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1434 UNREG cmpl deferred logo x%x "
- "on NPort x%x Data: x%x %p\n",
+ "on NPort x%x Data: x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did, ndlp);
@@ -4805,6 +4832,10 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ }
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
}
@@ -4843,7 +4874,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
- "Data: %p\n",
+ "Data: x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did,
ndlp->nlp_flag, ndlp);
@@ -4893,7 +4924,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on "
- "NPort x%x deferred flg x%x Data:%p\n",
+ "NPort x%x deferred flg x%x "
+ "Data:x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp);
@@ -5034,16 +5066,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
if (NLP_CHK_FREE_REQ(ndlp)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0280 lpfc_cleanup_node: ndlp:x%p "
+ "0280 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
lpfc_dequeue_node(vport, ndlp);
} else {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0281 lpfc_cleanup_node: ndlp:x%p "
+ "0281 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
lpfc_disable_node(vport, ndlp);
}
@@ -5104,6 +5136,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
lpfc_unreg_rpi(vport, ndlp);
return 0;
@@ -5132,7 +5166,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* allocated by the firmware.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5168,8 +5202,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* for registered rport so need to cleanup rport
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0940 removed node x%p DID x%x "
- " rport not null %p\n",
+ "0940 removed node x%px DID x%x "
+ " rport not null x%px\n",
ndlp, ndlp->nlp_DID, ndlp->rport);
rport = ndlp->rport;
rdata = rport->dd_data;
@@ -5243,15 +5277,15 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (lpfc_matchdid(vport, ndlp, did)) {
- data1 = (((uint32_t) ndlp->nlp_state << 24) |
- ((uint32_t) ndlp->nlp_xri << 16) |
- ((uint32_t) ndlp->nlp_type << 8) |
- ((uint32_t) ndlp->nlp_rpi & 0xff));
+ data1 = (((uint32_t)ndlp->nlp_state << 24) |
+ ((uint32_t)ndlp->nlp_xri << 16) |
+ ((uint32_t)ndlp->nlp_type << 8) |
+ ((uint32_t)ndlp->nlp_usg_map & 0xff));
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
- "Data: x%p x%x x%x x%x %p\n",
+ "Data: x%px x%x x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, data1,
+ ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
return ndlp;
}
@@ -5296,7 +5330,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
spin_unlock_irqrestore(shost->host_lock, iflags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"2025 FIND node DID "
- "Data: x%p x%x x%x x%x %p\n",
+ "Data: x%px x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1,
ndlp->active_rrqs_xri_bitmap);
@@ -5336,8 +5370,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (vport->phba->nvmet_support)
return NULL;
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (!ndlp)
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "0014 Could not enable ndlp\n");
return NULL;
+ }
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5960,7 +5997,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -6014,8 +6051,8 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3185 FIND node filter %p DID "
- "ndlp %p did x%x flg x%x st x%x "
+ "3185 FIND node filter %ps DID "
+ "ndlp x%px did x%x flg x%x st x%x "
"xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state,
@@ -6025,7 +6062,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
}
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3186 FIND node filter %p NOT FOUND.\n", filter);
+ "3186 FIND node filter %ps NOT FOUND.\n", filter);
return NULL;
}
@@ -6065,10 +6102,11 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long flags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, flags);
ndlp = __lpfc_findnode_rpi(vport, rpi);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, flags);
return ndlp;
}
@@ -6149,7 +6187,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -6187,8 +6225,9 @@ lpfc_nlp_release(struct kref *kref)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 lpfc_nlp_release: ndlp:x%p did %x "
+ "0279 %s: ndlp:x%px did %x "
"usgmap:x%x refcnt:%d rpi:%x\n",
+ __func__,
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
kref_read(&ndlp->kref), ndlp->nlp_rpi);
@@ -6200,8 +6239,6 @@ lpfc_nlp_release(struct kref *kref)
spin_lock_irqsave(&phba->ndlp_lock, flags);
NLP_CLR_NODE_ACT(ndlp);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -6237,9 +6274,9 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0276 lpfc_nlp_get: ndlp:x%p "
+ "0276 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return NULL;
} else
@@ -6265,9 +6302,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
return 1;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
/* Check the ndlp memory free acknowledge flag to avoid the
@@ -6277,9 +6314,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
if (NLP_CHK_FREE_ACK(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0274 lpfc_nlp_put: ndlp:x%p "
+ "0274 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return 1;
}
@@ -6290,9 +6327,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
if (NLP_CHK_IACT_REQ(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0275 lpfc_nlp_put: ndlp:x%p "
+ "0275 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5b439a6dcde1..436cdc8c5ef4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -843,7 +843,7 @@ typedef struct _ADISC { /* Structure is in Big Endian format */
struct lpfc_name portName;
struct lpfc_name nodeName;
uint32_t DID;
-} ADISC;
+} __packed ADISC;
typedef struct _FARP { /* Structure is in Big Endian format */
uint32_t Mflags:8;
@@ -873,7 +873,7 @@ typedef struct _FAN { /* Structure is in Big Endian format */
uint32_t Fdid;
struct lpfc_name FportName;
struct lpfc_name FnodeName;
-} FAN;
+} __packed FAN;
typedef struct _SCR { /* Structure is in Big Endian format */
uint8_t resvd1;
@@ -917,7 +917,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */
union {
RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
} un;
-} RNID;
+} __packed RNID;
typedef struct _RPS { /* Structure is in Big Endian format */
union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 77f9a55a3f54..bd533475c86a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2050,6 +2050,23 @@ struct sli4_sge { /* SLI-4 */
uint32_t sge_len;
};
+struct sli4_hybrid_sgl {
+ struct list_head list_node;
+ struct sli4_sge *dma_sgl;
+ dma_addr_t dma_phys_sgl;
+};
+
+struct fcp_cmd_rsp_buf {
+ struct list_head list_node;
+
+ /* for storing cmd/rsp dma alloc'ed virt_addr */
+ struct fcp_cmnd *fcp_cmnd;
+ struct fcp_rsp *fcp_rsp;
+
+ /* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */
+ dma_addr_t fcp_cmd_rsp_dma_handle;
+};
+
struct sli4_sge_diseed { /* SLI-4 */
uint32_t ref_tag;
uint32_t ref_tag_tran;
@@ -3449,6 +3466,9 @@ struct lpfc_sli4_parameters {
#define cfg_xib_SHIFT 4
#define cfg_xib_MASK 0x00000001
#define cfg_xib_WORD word19
+#define cfg_xpsgl_SHIFT 6
+#define cfg_xpsgl_MASK 0x00000001
+#define cfg_xpsgl_WORD word19
#define cfg_eqdr_SHIFT 8
#define cfg_eqdr_MASK 0x00000001
#define cfg_eqdr_WORD word19
@@ -3460,6 +3480,10 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
+#define cfg_nsler_SHIFT 12
+#define cfg_nsler_MASK 0x00000001
+#define cfg_nsler_WORD word19
+
uint32_t word20;
#define cfg_max_tow_xri_SHIFT 0
#define cfg_max_tow_xri_MASK 0x0000ffff
@@ -4314,6 +4338,12 @@ struct wqe_common {
#define wqe_rcvoxid_SHIFT 16
#define wqe_rcvoxid_MASK 0x0000FFFF
#define wqe_rcvoxid_WORD word9
+#define wqe_sof_SHIFT 24
+#define wqe_sof_MASK 0x000000FF
+#define wqe_sof_WORD word9
+#define wqe_eof_SHIFT 16
+#define wqe_eof_MASK 0x000000FF
+#define wqe_eof_WORD word9
uint32_t word10;
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
@@ -4595,6 +4625,7 @@ struct lpfc_nvme_prli {
#define prli_type_code_WORD word1
uint32_t word_rsvd2;
uint32_t word_rsvd3;
+
uint32_t word4;
#define prli_fba_SHIFT 0
#define prli_fba_MASK 0x00000001
@@ -4611,6 +4642,9 @@ struct lpfc_nvme_prli {
#define prli_conf_SHIFT 7
#define prli_conf_MASK 0x00000001
#define prli_conf_WORD word4
+#define prli_nsler_SHIFT 8
+#define prli_nsler_MASK 0x00000001
+#define prli_nsler_WORD word4
uint32_t word5;
#define prli_fb_sz_SHIFT 0
#define prli_fb_sz_MASK 0x0000ffff
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 1ac98becb5ba..e91377a4cafe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -39,6 +39,7 @@
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/bitops.h>
+#include <linux/crash_dump.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -65,12 +66,6 @@
#include "lpfc_version.h"
#include "lpfc_ids.h"
-char *_dump_buf_data;
-unsigned long _dump_buf_data_order;
-char *_dump_buf_dif;
-unsigned long _dump_buf_dif_order;
-spinlock_t _dump_buf_lock;
-
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
@@ -1081,8 +1076,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- spin_lock(&qp->abts_scsi_buf_list_lock);
- list_splice_init(&qp->lpfc_abts_scsi_buf_list,
+ spin_lock(&qp->abts_io_buf_list_lock);
+ list_splice_init(&qp->lpfc_abts_io_buf_list,
&aborts);
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
@@ -1093,29 +1088,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_lock(&qp->io_buf_list_put_lock);
list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
qp->put_io_bufs += qp->abts_scsi_io_bufs;
+ qp->put_io_bufs += qp->abts_nvme_io_bufs;
qp->abts_scsi_io_bufs = 0;
+ qp->abts_nvme_io_bufs = 0;
spin_unlock(&qp->io_buf_list_put_lock);
- spin_unlock(&qp->abts_scsi_buf_list_lock);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- spin_lock(&qp->abts_nvme_buf_list_lock);
- list_splice_init(&qp->lpfc_abts_nvme_buf_list,
- &nvme_aborts);
- list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
- list) {
- psb->pCmd = NULL;
- psb->status = IOSTAT_SUCCESS;
- cnt++;
- }
- spin_lock(&qp->io_buf_list_put_lock);
- qp->put_io_bufs += qp->abts_nvme_io_bufs;
- qp->abts_nvme_io_bufs = 0;
- list_splice_init(&nvme_aborts,
- &qp->lpfc_io_buf_list_put);
- spin_unlock(&qp->io_buf_list_put_lock);
- spin_unlock(&qp->abts_nvme_buf_list_lock);
-
- }
+ spin_unlock(&qp->abts_io_buf_list_lock);
}
spin_unlock_irq(&phba->hbalock);
@@ -1261,6 +1238,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
unsigned char *eqcnt = NULL;
uint32_t usdelay;
int i;
+ bool update = false;
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
return;
@@ -1274,20 +1252,29 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
if (!eqcnt)
goto requeue;
- /* Loop thru all IRQ vectors */
- for (i = 0; i < phba->cfg_irq_chann; i++) {
- /* Get the EQ corresponding to the IRQ vector */
- eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (eq && eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
- continue;
- }
+ if (phba->cfg_irq_chann > 1) {
+ /* Loop thru all IRQ vectors */
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+ if (!eq)
+ continue;
+ if (eq->q_mode) {
+ update = true;
+ break;
+ }
+ if (eqcnt[eq->last_cpu] < 2)
+ eqcnt[eq->last_cpu]++;
+ }
+ } else
+ update = true;
for_each_present_cpu(i) {
- if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
- continue;
-
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
+ if (!update && eqcnt[i] < 2) {
+ eqi->icnt = 0;
+ continue;
+ }
usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
LPFC_EQ_DELAY_STEP;
@@ -1535,6 +1522,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
lpfc_hba_down_post(phba);
lpfc_unblock_mgmt_io(phba);
@@ -1796,6 +1784,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
"2887 Reset Needed: Attempting Port "
"Recovery...\n");
lpfc_offline_prep(phba, mbx_action);
+ lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
/* release interrupt for possible resource change */
lpfc_sli4_disable_intr(phba);
@@ -1915,7 +1904,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"7624 Firmware not ready: Failing UE recovery,"
" waited %dSec", i);
- lpfc_sli4_offline_eratt(phba);
+ phba->link_state = LPFC_HBA_ERROR;
break;
case LPFC_SLI_INTF_IF_TYPE_2:
@@ -1989,9 +1978,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
/* fall through for not able to recover */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3152 Unrecoverable error, bring the port "
- "offline\n");
- lpfc_sli4_offline_eratt(phba);
+ "3152 Unrecoverable error\n");
+ phba->link_state = LPFC_HBA_ERROR;
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -2863,7 +2851,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
LOG_NODE,
- "0282 did:x%x ndlp:x%p "
+ "0282 did:x%x ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
ndlp->nlp_DID, (void *)ndlp,
ndlp->nlp_usg_map,
@@ -3067,7 +3055,7 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0009 rpi:%x DID:%x "
- "flg:%x map:%x %p\n", ndlp->nlp_rpi,
+ "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_usg_map, ndlp);
}
@@ -3252,12 +3240,8 @@ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
lpfc_destroy_expedite_pool(phba);
- if (!(phba->pport->load_flag & FC_UNLOADING)) {
- lpfc_sli_flush_fcp_rings(phba);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
- }
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_sli_flush_io_rings(phba);
hwq_count = phba->cfg_hdw_queue;
@@ -3491,7 +3475,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_printf_vlog(ndlp->vport,
KERN_INFO, LOG_NODE,
"0011 lpfc_offline: "
- "ndlp:x%p did %x "
+ "ndlp:x%px did %x "
"usgmap:x%x rpi:%x\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_usg_map,
@@ -3636,6 +3620,9 @@ lpfc_io_free(struct lpfc_hba *phba)
qp->put_io_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
kfree(lpfc_ncmd);
qp->total_io_bufs--;
}
@@ -3649,6 +3636,9 @@ lpfc_io_free(struct lpfc_hba *phba)
qp->get_io_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
kfree(lpfc_ncmd);
qp->total_io_bufs--;
}
@@ -4097,18 +4087,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
LIST_HEAD(post_nblist);
LIST_HEAD(nvme_nblist);
- /* Sanity check to ensure our sizing is right for both SCSI and NVME */
- if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "6426 Common buffer size %zd exceeds %d\n",
- sizeof(struct lpfc_io_buf),
- LPFC_COMMON_IO_BUF_SZ);
- return 0;
- }
-
phba->sli4_hba.io_xri_cnt = 0;
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
- lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
+ lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
if (!lpfc_ncmd)
break;
/*
@@ -4124,22 +4105,30 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
break;
}
- /*
- * 4K Page alignment is CRITICAL to BlockGuard, double check
- * to be sure.
- */
- if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- (((unsigned long)(lpfc_ncmd->data) &
- (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3369 Memory alignment err: addr=%lx\n",
- (unsigned long)lpfc_ncmd->data);
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- lpfc_ncmd->data, lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- break;
+ if (phba->cfg_xpsgl && !phba->nvmet_support) {
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
+ } else {
+ /*
+ * 4K Page alignment is CRITICAL to BlockGuard, double
+ * check to be sure.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ (((unsigned long)(lpfc_ncmd->data) &
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3369 Memory alignment err: "
+ "addr=%lx\n",
+ (unsigned long)lpfc_ncmd->data);
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data,
+ lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ break;
+ }
}
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
+
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
@@ -4318,7 +4307,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
+ else
+ shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -6336,6 +6329,24 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ phba->lpfc_sg_dma_buf_pool =
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
+ BPL_ALIGN_SZ, 0);
+
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto fail_free_mem;
+
+ phba->lpfc_cmd_rsp_buf_pool =
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
+ &phba->pcidev->dev,
+ sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp),
+ BPL_ALIGN_SZ, 0);
+
+ if (!phba->lpfc_cmd_rsp_buf_pool)
+ goto fail_free_dma_buf_pool;
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6354,6 +6365,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
return 0;
+
+fail_free_dma_buf_pool:
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
+fail_free_mem:
+ lpfc_mem_free(phba);
+ return -ENOMEM;
}
/**
@@ -6414,6 +6432,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
+ /* Allocate all driver workqueues here */
+
+ /* The lpfc_wq workqueue for deferred irq use */
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+
/*
* Initialize timers used by driver
*/
@@ -6448,102 +6471,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* The WQ create will allocate the ring.
*/
- /*
- * 1 for cmd, 1 for rsp, NVME adds an extra one
- * for boundary conditions in its max_sgl_segment template.
- */
- extra = 2;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- extra++;
-
- /*
- * It doesn't matter what family our adapter is in, we are
- * limited to 2 Pages, 512 SGEs, for our SGL.
- * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
- */
- max_buf_size = (2 * SLI4_PAGE_SIZE);
-
- /*
- * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
- * used to create the sg_dma_buf_pool must be calculated.
- */
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
- /*
- * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
- * the FCP rsp, and a SGE. Sice we have no control
- * over how many protection segments the SCSI Layer
- * will hand us (ie: there could be one for every block
- * in the IO), just allocate enough SGEs to accomidate
- * our max amount and we need to limit lpfc_sg_seg_cnt
- * to minimize the risk of running out.
- */
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) + max_buf_size;
-
- /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
- phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
-
- /*
- * If supporting DIF, reduce the seg count for scsi to
- * allow room for the DIF sges.
- */
- if (phba->cfg_enable_bg &&
- phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
- phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
- else
- phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
-
- } else {
- /*
- * The scsi_buf for a regular I/O holds the FCP cmnd,
- * the FCP rsp, a SGE for each, and a SGE for up to
- * cfg_sg_seg_cnt data segments.
- */
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + extra) *
- sizeof(struct sli4_sge));
-
- /* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
- phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
-
- /*
- * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
- * need to post 1 page for the SGL.
- */
- }
-
- /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
- "6300 Reducing NVME sg segment "
- "cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
-
- /* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
-
- if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
- phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
- else
- phba->cfg_sg_dma_buf_size =
- SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9087 sg_seg_cnt:%d dmabuf_size:%d "
- "total:%d scsi:%d nvme:%d\n",
- phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
- phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
- phba->cfg_nvme_seg_cnt);
-
/* Initialize buffer queue management fields */
INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
@@ -6552,11 +6479,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/*
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- /* Initialize the Abort scsi buffer list used by driver */
- spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
- }
+ /* Initialize the Abort buffer list used by driver */
+ spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Initialize the Abort nvme buffer list used by driver */
@@ -6764,6 +6689,131 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
+ /*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
+ * It doesn't matter what family our adapter is in, we are
+ * limited to 2 Pages, 512 SGEs, for our SGL.
+ * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+ */
+ max_buf_size = (2 * SLI4_PAGE_SIZE);
+
+ /*
+ * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be calculated.
+ */
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
+ /* Both cfg_enable_bg and cfg_external_dif code paths */
+
+ /*
+ * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
+ * the FCP rsp, and a SGE. Sice we have no control
+ * over how many protection segments the SCSI Layer
+ * will hand us (ie: there could be one for every block
+ * in the IO), just allocate enough SGEs to accomidate
+ * our max amount and we need to limit lpfc_sg_seg_cnt
+ * to minimize the risk of running out.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) + max_buf_size;
+
+ /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+ /*
+ * If supporting DIF, reduce the seg count for scsi to
+ * allow room for the DIF sges.
+ */
+ if (phba->cfg_enable_bg &&
+ phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+ phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+ else
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+ } else {
+ /*
+ * The scsi_buf for a regular I/O holds the FCP cmnd,
+ * the FCP rsp, a SGE for each, and a SGE for up to
+ * cfg_sg_seg_cnt data segments.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + extra) *
+ sizeof(struct sli4_sge));
+
+ /* Total SGEs for scsi_sg_list */
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+ /*
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
+ * need to post 1 page for the SGL.
+ */
+ }
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
+ else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
+ phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+ else
+ phba->cfg_sg_dma_buf_size =
+ SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+ phba->border_sge_num = phba->cfg_sg_dma_buf_size /
+ sizeof(struct sli4_sge);
+
+ /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing NVME sg segment "
+ "cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+
+ /* Initialize the host templates with the updated values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9087 sg_seg_cnt:%d dmabuf_size:%d "
+ "total:%d scsi:%d nvme:%d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+ phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
+
+ if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+ i = phba->cfg_sg_dma_buf_size;
+ else
+ i = SLI4_PAGE_SIZE;
+
+ phba->lpfc_sg_dma_buf_pool =
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev,
+ phba->cfg_sg_dma_buf_size,
+ i, 0);
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto out_free_bsmbx;
+
+ phba->lpfc_cmd_rsp_buf_pool =
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
+ &phba->pcidev->dev,
+ sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp),
+ i, 0);
+ if (!phba->lpfc_cmd_rsp_buf_pool)
+ goto out_free_sg_dma_buf;
+
mempool_free(mboxq, phba->mbox_mem_pool);
/* Verify OAS is supported */
@@ -6775,12 +6825,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
- goto out_free_bsmbx;
+ goto out_free_cmd_rsp_buf;
/* Create driver internal CQE event pool */
rc = lpfc_sli4_cq_event_pool_create(phba);
if (rc)
- goto out_free_bsmbx;
+ goto out_free_cmd_rsp_buf;
/* Initialize sgl lists per host */
lpfc_init_sgl_list(phba);
@@ -6871,6 +6921,12 @@ out_free_active_sgl:
lpfc_free_active_sgl(phba);
out_destroy_cq_event_pool:
lpfc_sli4_cq_event_pool_destroy(phba);
+out_free_cmd_rsp_buf:
+ dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+ phba->lpfc_cmd_rsp_buf_pool = NULL;
+out_free_sg_dma_buf:
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
@@ -6997,12 +7053,6 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
return error;
}
- /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
- if (phba->sli_rev == LPFC_SLI_REV4)
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
- else
- phba->wq = NULL;
-
return 0;
}
@@ -7563,7 +7613,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
uint32_t old_mask;
uint32_t old_guard;
- int pagecnt = 10;
if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
@@ -7600,56 +7649,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
"layer, Bad protection parameters: %d %d\n",
old_mask, old_guard);
}
-
- if (!_dump_buf_data) {
- while (pagecnt) {
- spin_lock_init(&_dump_buf_lock);
- _dump_buf_data =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9043 BLKGRD: allocated %d pages for "
- "_dump_buf_data at 0x%p\n",
- (1 << pagecnt), _dump_buf_data);
- _dump_buf_data_order = pagecnt;
- memset(_dump_buf_data, 0,
- ((1 << PAGE_SHIFT) << pagecnt));
- break;
- } else
- --pagecnt;
- }
- if (!_dump_buf_data_order)
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9044 BLKGRD: ERROR unable to allocate "
- "memory for hexdump\n");
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
- "\n", _dump_buf_data);
- if (!_dump_buf_dif) {
- while (pagecnt) {
- _dump_buf_dif =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_dif) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9046 BLKGRD: allocated %d pages for "
- "_dump_buf_dif at 0x%p\n",
- (1 << pagecnt), _dump_buf_dif);
- _dump_buf_dif_order = pagecnt;
- memset(_dump_buf_dif, 0,
- ((1 << PAGE_SHIFT) << pagecnt));
- break;
- } else
- --pagecnt;
- }
- if (!_dump_buf_dif_order)
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9047 BLKGRD: ERROR unable to allocate "
- "memory for hexdump\n");
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
- _dump_buf_dif);
}
/**
@@ -8309,6 +8308,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+ /* Reduce resource usage in kdump environment */
+ if (is_kdump_kernel() &&
+ phba->sli4_hba.max_cfg_param.max_xri > 512)
+ phba->sli4_hba.max_cfg_param.max_xri = 512;
phba->sli4_hba.max_cfg_param.xri_base =
bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
phba->sli4_hba.max_cfg_param.max_vpi =
@@ -8382,11 +8385,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
*/
qmin -= 4;
- /* If NVME is configured, double the number of CQ/WQs needed */
- if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
- !phba->nvmet_support)
- qmin /= 2;
-
/* Check to see if there is enough for NVME */
if ((phba->cfg_irq_chann > qmin) ||
(phba->cfg_hdw_queue > qmin)) {
@@ -8643,51 +8641,14 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
}
static int
-lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
-{
- struct lpfc_queue *qdesc;
- int cpu;
-
- cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
- phba->sli4_hba.cq_esize,
- LPFC_CQE_EXP_COUNT, cpu);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0508 Failed allocate fast-path NVME CQ (%d)\n",
- wqidx);
- return 1;
- }
- qdesc->qe_valid = 1;
- qdesc->hdwq = wqidx;
- qdesc->chann = cpu;
- phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
-
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
- LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
- cpu);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0509 Failed allocate fast-path NVME WQ (%d)\n",
- wqidx);
- return 1;
- }
- qdesc->hdwq = wqidx;
- qdesc->chann = wqidx;
- phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
- list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
- return 0;
-}
-
-static int
-lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
+lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
+ u32 wqesize;
int cpu;
- cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
- /* Create Fast Path FCP CQs */
+ cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
+ /* Create Fast Path IO CQs */
if (phba->enab_exp_wqcq_pages)
/* Increase the CQ size when WQEs contain an embedded cdb */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
@@ -8700,15 +8661,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
+ "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
return 1;
}
qdesc->qe_valid = 1;
- qdesc->hdwq = wqidx;
+ qdesc->hdwq = idx;
qdesc->chann = cpu;
- phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
+ phba->sli4_hba.hdwq[idx].io_cq = qdesc;
- /* Create Fast Path FCP WQs */
+ /* Create Fast Path IO WQs */
if (phba->enab_exp_wqcq_pages) {
/* Increase the WQ size when WQEs contain an embedded cdb */
wqesize = (phba->fcp_embed_io) ?
@@ -8723,13 +8684,13 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0503 Failed allocate fast-path FCP WQ (%d)\n",
- wqidx);
+ "0503 Failed allocate fast-path IO WQ (%d)\n",
+ idx);
return 1;
}
- qdesc->hdwq = wqidx;
- qdesc->chann = wqidx;
- phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
+ qdesc->hdwq = idx;
+ qdesc->chann = cpu;
+ phba->sli4_hba.hdwq[idx].io_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
return 0;
}
@@ -8793,12 +8754,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qp->get_io_bufs = 0;
qp->put_io_bufs = 0;
qp->total_io_bufs = 0;
- spin_lock_init(&qp->abts_scsi_buf_list_lock);
- INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
+ spin_lock_init(&qp->abts_io_buf_list_lock);
+ INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs = 0;
- spin_lock_init(&qp->abts_nvme_buf_list_lock);
- INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
qp->abts_nvme_io_bufs = 0;
+ INIT_LIST_HEAD(&qp->sgl_list);
+ INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
+ spin_lock_init(&qp->hdwq_lock);
}
}
@@ -8864,7 +8826,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
qdesc->qe_valid = 1;
qdesc->hdwq = cpup->hdwq;
- qdesc->chann = cpu; /* First CPU this EQ is affinitised to */
+ qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
qdesc->last_cpu = qdesc->chann;
/* Save the allocated EQ in the Hardware Queue */
@@ -8895,41 +8857,31 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
}
- /* Allocate SCSI SLI4 CQ/WQs */
+ /* Allocate IO Path SLI4 CQ/WQs */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (lpfc_alloc_fcp_wq_cq(phba, idx))
+ if (lpfc_alloc_io_wq_cq(phba, idx))
goto out_error;
}
- /* Allocate NVME SLI4 CQ/WQs */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (lpfc_alloc_nvme_wq_cq(phba, idx))
- goto out_error;
- }
-
- if (phba->nvmet_support) {
- for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
- cpu = lpfc_find_cpu_handle(phba, idx,
- LPFC_FIND_BY_HDWQ);
- qdesc = lpfc_sli4_queue_alloc(
- phba,
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ cpu = lpfc_find_cpu_handle(phba, idx,
+ LPFC_FIND_BY_HDWQ);
+ qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount,
cpu);
- if (!qdesc) {
- lpfc_printf_log(
- phba, KERN_ERR, LOG_INIT,
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
"CQ Set (%d)\n", idx);
- goto out_error;
- }
- qdesc->qe_valid = 1;
- qdesc->hdwq = idx;
- qdesc->chann = cpu;
- phba->sli4_hba.nvmet_cqset[idx] = qdesc;
+ goto out_error;
}
+ qdesc->qe_valid = 1;
+ qdesc->hdwq = idx;
+ qdesc->chann = cpu;
+ phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
}
@@ -8960,7 +8912,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
qdesc->qe_valid = 1;
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.els_cq = qdesc;
@@ -8978,7 +8930,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0505 Failed allocate slow-path MQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.mbx_wq = qdesc;
/*
@@ -8994,7 +8946,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0504 Failed allocate slow-path ELS WQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.els_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
@@ -9008,7 +8960,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6079 Failed allocate NVME LS CQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
qdesc->qe_valid = 1;
phba->sli4_hba.nvmels_cq = qdesc;
@@ -9021,7 +8973,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6080 Failed allocate NVME LS WQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.nvmels_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
}
@@ -9164,15 +9116,13 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
/* Loop thru all Hardware Queues */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
/* Free the CQ/WQ corresponding to the Hardware Queue */
- lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
- lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
- lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
- lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
- hdwq[idx].hba_eq = NULL;
- hdwq[idx].fcp_cq = NULL;
- hdwq[idx].nvme_cq = NULL;
- hdwq[idx].fcp_wq = NULL;
- hdwq[idx].nvme_wq = NULL;
+ lpfc_sli4_queue_free(hdwq[idx].io_cq);
+ lpfc_sli4_queue_free(hdwq[idx].io_wq);
+ hdwq[idx].io_cq = NULL;
+ hdwq[idx].io_wq = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
+ lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
}
/* Loop thru all IRQ vectors */
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
@@ -9372,8 +9322,7 @@ lpfc_setup_cq_lookup(struct lpfc_hba *phba)
list_for_each_entry(childq, &eq->child_list, list) {
if (childq->queue_id > phba->sli4_hba.cq_max)
continue;
- if ((childq->subtype == LPFC_FCP) ||
- (childq->subtype == LPFC_NVME))
+ if (childq->subtype == LPFC_IO)
phba->sli4_hba.cq_lookup[childq->queue_id] =
childq;
}
@@ -9499,31 +9448,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
/* Loop thru all Hardware Queues */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- cpu = lpfc_find_cpu_handle(phba, qidx,
- LPFC_FIND_BY_HDWQ);
- cpup = &phba->sli4_hba.cpu_map[cpu];
-
- /* Create the CQ/WQ corresponding to the
- * Hardware Queue
- */
- rc = lpfc_create_wq_cq(phba,
- phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
- qp[qidx].nvme_cq,
- qp[qidx].nvme_wq,
- &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
- qidx, LPFC_NVME);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6123 Failed to setup fastpath "
- "NVME WQ/CQ (%d), rc = 0x%x\n",
- qidx, (uint32_t)rc);
- goto out_destroy;
- }
- }
- }
-
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -9531,14 +9455,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/* Create the CQ/WQ corresponding to the Hardware Queue */
rc = lpfc_create_wq_cq(phba,
phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
- qp[qidx].fcp_cq,
- qp[qidx].fcp_wq,
- &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
- qidx, LPFC_FCP);
+ qp[qidx].io_cq,
+ qp[qidx].io_wq,
+ &phba->sli4_hba.hdwq[qidx].io_cq_map,
+ qidx,
+ LPFC_IO);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0535 Failed to setup fastpath "
- "FCP WQ/CQ (%d), rc = 0x%x\n",
+ "IO WQ/CQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
goto out_destroy;
}
@@ -9838,10 +9763,8 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
/* Destroy the CQ/WQ corresponding to Hardware Queue */
qp = &phba->sli4_hba.hdwq[qidx];
- lpfc_wq_destroy(phba, qp->fcp_wq);
- lpfc_wq_destroy(phba, qp->nvme_wq);
- lpfc_cq_destroy(phba, qp->fcp_cq);
- lpfc_cq_destroy(phba, qp->nvme_cq);
+ lpfc_wq_destroy(phba, qp->io_wq);
+ lpfc_cq_destroy(phba, qp->io_cq);
}
/* Loop thru all IRQ vectors */
for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
@@ -10711,7 +10634,7 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
static void
lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
{
- int i, cpu, idx, new_cpu, start_cpu, first_cpu;
+ int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
int max_phys_id, min_phys_id;
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
@@ -10753,8 +10676,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3328 CPU physid %d coreid %d\n",
- cpup->phys_id, cpup->core_id);
+ "3328 CPU %d physid %d coreid %d flag x%x\n",
+ cpu, cpup->phys_id, cpup->core_id, cpup->flag);
if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id;
@@ -10812,17 +10735,17 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
cpup->eq = idx;
cpup->irq = pci_irq_vector(phba->pcidev, idx);
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d\n",
- cpu, cpup->irq, cpup->eq);
-
/* If this is the first CPU thats assigned to this
* vector, set LPFC_CPU_FIRST_IRQ.
*/
if (!i)
cpup->flag |= LPFC_CPU_FIRST_IRQ;
i++;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d "
+ "irq %d eq %d flag x%x\n",
+ cpu, cpup->irq, cpup->eq, cpup->flag);
}
}
@@ -10936,69 +10859,103 @@ found_any:
}
}
+ /* Assign hdwq indices that are unique across all cpus in the map
+ * that are also FIRST_CPUs.
+ */
+ idx = 0;
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Only FIRST IRQs get a hdwq index assignment. */
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ continue;
+
+ /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
+ cpup->hdwq = idx;
+ idx++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3333 Set Affinity: CPU %d (phys %d core %d): "
+ "hdwq %d eq %d irq %d flg x%x\n",
+ cpu, cpup->phys_id, cpup->core_id,
+ cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ }
/* Finally we need to associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
+ * The next_idx is the idx from the FIRST_CPU loop above to account
+ * for irq_chann < hdwq. The idx is used for round-robin assignments
+ * and needs to start at 0.
*/
- idx = 0;
+ next_idx = idx;
start_cpu = 0;
+ idx = 0;
for_each_present_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
- if (idx >= phba->cfg_hdw_queue) {
- /* We need to reuse a Hardware Queue for another CPU,
- * so be smart about it and pick one that has its
- * IRQ/EQ mapped to the same phys_id (CPU package).
- * and core_id.
- */
- new_cpu = start_cpu;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
- if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
- (new_cpup->phys_id == cpup->phys_id) &&
- (new_cpup->core_id == cpup->core_id))
- goto found_hdwq;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
- new_cpu = first_cpu;
- }
- /* If we can't match both phys_id and core_id,
- * settle for just a phys_id match.
- */
- new_cpu = start_cpu;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
- if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
- (new_cpup->phys_id == cpup->phys_id))
- goto found_hdwq;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
- new_cpu = first_cpu;
+ /* FIRST cpus are already mapped. */
+ if (cpup->flag & LPFC_CPU_FIRST_IRQ)
+ continue;
+
+ /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
+ * of the unassigned cpus to the next idx so that all
+ * hdw queues are fully utilized.
+ */
+ if (next_idx < phba->cfg_hdw_queue) {
+ cpup->hdwq = next_idx;
+ next_idx++;
+ continue;
+ }
+
+ /* Not a First CPU and all hdw_queues are used. Reuse a
+ * Hardware Queue for another CPU, so be smart about it
+ * and pick one that has its IRQ/EQ mapped to the same phys_id
+ * (CPU package) and core_id.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+ new_cpup->phys_id == cpup->phys_id &&
+ new_cpup->core_id == cpup->core_id) {
+ goto found_hdwq;
}
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
- /* Otherwise just round robin on cfg_hdw_queue */
- cpup->hdwq = idx % phba->cfg_hdw_queue;
- goto logit;
-found_hdwq:
- /* We found an available entry, copy the IRQ info */
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
- start_cpu = first_cpu;
- cpup->hdwq = new_cpup->hdwq;
- } else {
- /* 1 to 1, CPU to hdwq */
- cpup->hdwq = idx;
+ /* If we can't match both phys_id and core_id,
+ * settle for just a phys_id match.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+ new_cpup->phys_id == cpup->phys_id)
+ goto found_hdwq;
+
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
}
-logit:
+
+ /* Otherwise just round robin on cfg_hdw_queue */
+ cpup->hdwq = idx % phba->cfg_hdw_queue;
+ idx++;
+ goto logit;
+ found_hdwq:
+ /* We found an available entry, copy the IRQ info */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+ cpup->hdwq = new_cpup->hdwq;
+ logit:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
"hdwq %d eq %d irq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
- idx++;
}
/* The cpu_map array will be used later during initialization
@@ -11089,10 +11046,10 @@ vec_fail_out:
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI interrupt mode to device with
- * SLI-4 interface spec. The kernel function pci_enable_msi() is called
- * to enable the MSI vector. The device driver is responsible for calling
- * the request_irq() to register MSI vector with a interrupt the handler,
- * which is done in this function.
+ * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
+ * called to enable the MSI vector. The device driver is responsible for
+ * calling the request_irq() to register MSI vector with a interrupt the
+ * handler, which is done in this function.
*
* Return codes
* 0 - successful
@@ -11103,20 +11060,21 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
- rc = pci_enable_msi(phba->pcidev);
- if (!rc)
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
+ if (rc > 0)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0487 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0488 PCI enable MSI mode failed (%d)\n", rc);
- return rc;
+ return rc ? rc : -1;
}
rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
0, LPFC_DRIVER_NAME, phba);
if (rc) {
- pci_disable_msi(phba->pcidev);
+ pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0490 MSI request_irq failed (%d)\n", rc);
return rc;
@@ -11282,11 +11240,10 @@ static void
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *qp;
- int idx, ccnt, fcnt;
+ int idx, ccnt;
int wait_time = 0;
int io_xri_cmpl = 1;
int nvmet_xri_cmpl = 1;
- int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
/* Driver just aborted IOs during the hba_unset process. Pause
@@ -11300,32 +11257,21 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
lpfc_nvme_wait_for_io_drain(phba);
ccnt = 0;
- fcnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- fcp_xri_cmpl = list_empty(
- &qp->lpfc_abts_scsi_buf_list);
- if (!fcp_xri_cmpl) /* if list is NOT empty */
- fcnt++;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- io_xri_cmpl = list_empty(
- &qp->lpfc_abts_nvme_buf_list);
- if (!io_xri_cmpl) /* if list is NOT empty */
- ccnt++;
- }
+ io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
}
if (ccnt)
io_xri_cmpl = 0;
- if (fcnt)
- fcp_xri_cmpl = 0;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvmet_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
}
- while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
- !nvmet_xri_cmpl) {
+ while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
if (!nvmet_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11334,12 +11280,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
wait_time/1000);
if (!io_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6100 NVME XRI exchange busy "
- "wait time: %d seconds.\n",
- wait_time/1000);
- if (!fcp_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2877 FCP XRI exchange busy "
+ "6100 IO XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
if (!els_xri_cmpl)
@@ -11355,24 +11296,15 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
}
ccnt = 0;
- fcnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- fcp_xri_cmpl = list_empty(
- &qp->lpfc_abts_scsi_buf_list);
- if (!fcp_xri_cmpl) /* if list is NOT empty */
- fcnt++;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- io_xri_cmpl = list_empty(
- &qp->lpfc_abts_nvme_buf_list);
- if (!io_xri_cmpl) /* if list is NOT empty */
- ccnt++;
- }
+ io_xri_cmpl = list_empty(
+ &qp->lpfc_abts_io_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
}
if (ccnt)
io_xri_cmpl = 0;
- if (fcnt)
- fcp_xri_cmpl = 0;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvmet_xri_cmpl = list_empty(
@@ -11616,6 +11548,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+ /* Check for Extended Pre-Registered SGL support */
+ phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
+
/* Check for firmware nvme support */
rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
bf_get(cfg_xib, mbx_sli4_parameters));
@@ -11646,6 +11581,7 @@ fcponly:
phba->nvme_support = 0;
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvme_seg_cnt = 0;
/* If no FC4 type support, move to just SCSI support */
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
@@ -11654,6 +11590,15 @@ fcponly:
}
}
+ /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+ * accommodate 512K and 1M IOs in a single nvme buf and supply
+ * enough NVME LS iocb buffers for larger connectivity counts.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ phba->cfg_iocb_cnt = 5;
+ }
+
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
@@ -11718,6 +11663,14 @@ fcponly:
else
phba->mds_diags_support = 0;
+ /*
+ * Check if the SLI port supports NSLER
+ */
+ if (bf_get(cfg_nsler, mbx_sli4_parameters))
+ phba->nsler = 1;
+ else
+ phba->nsler = 0;
+
return 0;
}
@@ -12146,7 +12099,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
- lpfc_sli_flush_fcp_rings(phba);
+ lpfc_sli_flush_io_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -12176,7 +12129,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
lpfc_stop_hba_timers(phba);
/* Clean up all driver's outstanding SCSI I/Os */
- lpfc_sli_flush_fcp_rings(phba);
+ lpfc_sli_flush_io_rings(phba);
}
/**
@@ -12948,12 +12901,8 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
- /* Flush all driver's outstanding SCSI I/Os as we are to reset */
- lpfc_sli_flush_fcp_rings(phba);
-
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
+ /* Flush all driver's outstanding I/Os as we are to reset */
+ lpfc_sli_flush_io_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -12984,12 +12933,8 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* stop all timers */
lpfc_stop_hba_timers(phba);
- /* Clean up all driver's outstanding SCSI I/Os */
- lpfc_sli_flush_fcp_rings(phba);
-
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
+ /* Clean up all driver's outstanding I/Os */
+ lpfc_sli_flush_io_rings(phba);
}
/**
@@ -13530,19 +13475,6 @@ lpfc_exit(void)
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
- if (_dump_buf_data) {
- printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
- "_dump_buf_data at 0x%p\n",
- (1L << _dump_buf_data_order), _dump_buf_data);
- free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
- }
-
- if (_dump_buf_dif) {
- printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
- "_dump_buf_dif at 0x%p\n",
- (1L << _dump_buf_dif_order), _dump_buf_dif);
- free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
- }
idr_destroy(&lpfc_hba_index);
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 66191fa35f63..ae09bb863497 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -72,8 +72,8 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
* lpfc_mem_alloc - create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
- * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
+ * Description: Creates and allocates PCI pools lpfc_mbuf_pool,
+ * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
* Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -89,36 +89,12 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
int i;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- /* Calculate alignment */
- if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
- i = phba->cfg_sg_dma_buf_size;
- else
- i = SLI4_PAGE_SIZE;
-
- phba->lpfc_sg_dma_buf_pool =
- dma_pool_create("lpfc_sg_dma_buf_pool",
- &phba->pcidev->dev,
- phba->cfg_sg_dma_buf_size,
- i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
- goto fail;
-
- } else {
- phba->lpfc_sg_dma_buf_pool =
- dma_pool_create("lpfc_sg_dma_buf_pool",
- &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
- align, 0);
-
- if (!phba->lpfc_sg_dma_buf_pool)
- goto fail;
- }
phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
LPFC_BPL_SIZE,
align, 0);
if (!phba->lpfc_mbuf_pool)
- goto fail_free_dma_buf_pool;
+ goto fail;
pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
sizeof(struct lpfc_dmabuf),
@@ -208,9 +184,6 @@ fail_free_drb_pool:
fail_free_lpfc_mbuf_pool:
dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
- fail_free_dma_buf_pool:
- dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
- phba->lpfc_sg_dma_buf_pool = NULL;
fail:
return -ENOMEM;
}
@@ -248,25 +221,22 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
- if (phba->lpfc_nvmet_drb_pool)
- dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
+ dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
phba->lpfc_nvmet_drb_pool = NULL;
- if (phba->lpfc_drb_pool)
- dma_pool_destroy(phba->lpfc_drb_pool);
+
+ dma_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
- if (phba->lpfc_hrb_pool)
- dma_pool_destroy(phba->lpfc_hrb_pool);
+
+ dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
- if (phba->txrdy_payload_pool)
- dma_pool_destroy(phba->txrdy_payload_pool);
+
+ dma_pool_destroy(phba->txrdy_payload_pool);
phba->txrdy_payload_pool = NULL;
- if (phba->lpfc_hbq_pool)
- dma_pool_destroy(phba->lpfc_hbq_pool);
+ dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
- if (phba->rrq_pool)
- mempool_destroy(phba->rrq_pool);
+ mempool_destroy(phba->rrq_pool);
phba->rrq_pool = NULL;
/* Free NLP memory pool */
@@ -290,10 +260,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
- /* Free DMA buffer memory pool */
- dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
- phba->lpfc_sg_dma_buf_pool = NULL;
-
/* Free Device Data memory pool */
if (phba->device_data_mem_pool) {
/* Ensure all objects have been returned to the pool */
@@ -366,6 +332,13 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
/* Free and destroy all the allocated memory pools */
lpfc_mem_free(phba);
+ /* Free DMA buffer memory pool */
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
+
+ dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+ phba->lpfc_cmd_rsp_buf_pool = NULL;
+
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 59252bfca14e..f4b879d25fe9 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -614,7 +614,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
out:
/* If we are authenticated, move to the proper state */
- if (ndlp->nlp_type & NLP_FCP_TARGET)
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
else
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -799,9 +799,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->writeXferRdyDis)
ndlp->nlp_flag |= NLP_FIRSTBURST;
}
- if (npr->Retry)
+ if (npr->Retry && ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ if (npr->Retry && phba->nsler &&
+ ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
+ ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+
+
/* If this driver is in nvme target mode, set the ndlp's fc4
* type to NVME provided the PRLI response claims NVME FC4
* type. Target mode does not issue gft_id so doesn't get
@@ -885,7 +891,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1435 release_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
- "Data: %p\n",
+ "Data: x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did,
ndlp->nlp_flag, ndlp);
@@ -1661,6 +1667,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1693,6 +1700,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
}
spin_unlock_irq(&phba->hbalock);
+ /* software abort if any GID_FT is outstanding */
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
+ ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
+ lpfc_els_abort(phba, ns_ndlp);
+ }
+
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1814,7 +1828,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ if (lpfc_issue_els_prli(vport, ndlp, 0)) {
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
} else {
if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
phba->targetport->port_id = vport->fc_myDID;
@@ -2012,6 +2030,11 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
+ if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
+ ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+ else
+ ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+
/* Target driver cannot solicit NVME FB. */
if (bf_get_be32(prli_tgt, nvpr)) {
/* Complete the nvme target roles. The transport
@@ -2891,18 +2914,21 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
uint32_t got_ndlp = 0;
+ uint32_t data1;
if (lpfc_nlp_get(ndlp))
got_ndlp = 1;
cur_state = ndlp->nlp_state;
+ data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+ ((uint32_t)ndlp->nlp_type));
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0211 DSM in event x%x on NPort x%x in "
"state %d rpi x%x Data: x%x x%x\n",
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
- ndlp->nlp_flag, ndlp->nlp_fc4_type);
+ ndlp->nlp_flag, data1);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM in: evt:%d ste:%d did:x%x",
@@ -2913,10 +2939,13 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* DSM out state <rc> on NPort <nlp_DID> */
if (got_ndlp) {
+ data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+ ((uint32_t)ndlp->nlp_type));
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0212 DSM out state %d on NPort x%x "
- "rpi x%x Data: x%x\n",
- rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag);
+ "rpi x%x Data: x%x x%x\n",
+ rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
+ data1);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 946642cee3df..a227e36cbdc2 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6073 Binding %s HdwQueue %d (cpu %d) to "
- "hdw_queue %d qhandle %p\n", str,
+ "hdw_queue %d qhandle x%px\n", str,
qidx, qhandle->cpu_id, qhandle->index, qhandle);
*handle = (void *)qhandle;
return 0;
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
+ "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
lport, qidx, handle);
kfree(handle);
}
@@ -293,7 +293,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
struct lpfc_nvme_lport *lport = localport->private;
lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
- "6173 localport %p delete complete\n",
+ "6173 localport x%px delete complete\n",
lport);
/* release any threads waiting for the unreg to complete */
@@ -332,7 +332,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport %p\n",
+ "6146 remoteport delete of remoteport x%px\n",
remoteport);
spin_lock_irq(&vport->phba->hbalock);
@@ -383,8 +383,8 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
- "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
- "lsreg:%p bmp:%p ndlp:%p\n",
+ "Data %px DID %x Xri: %x status %x reason x%x "
+ "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
@@ -404,7 +404,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
else
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6046 nvme cmpl without done call back? "
- "Data %p DID %x Xri: %x status %x\n",
+ "Data %px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status);
if (ndlp) {
@@ -436,6 +436,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
return 1;
wqe = &genwqe->wqe;
+ /* Initialize only 64 bytes */
memset(wqe, 0, sizeof(union lpfc_wqe));
genwqe->context3 = (uint8_t *)bmp;
@@ -516,7 +517,8 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6050 Issue GEN REQ WQE to NPORT x%x "
- "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
+ "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
+ "xmit:%d 1st:%d\n",
ndlp->nlp_DID, genwqe->iotag,
vport->port_state,
genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
@@ -594,7 +596,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6051 Remoteport %p, rport has invalid ndlp. "
+ "6051 Remoteport x%px, rport has invalid ndlp. "
"Failing LS Req\n", pnvme_rport);
return -ENODEV;
}
@@ -646,10 +648,10 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
- "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
- ndlp->nlp_DID,
- pnvme_lport, pnvme_rport,
+ "6149 Issue LS Req to DID 0x%06x lport x%px, "
+ "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
+ "%pad %pad\n",
+ ndlp->nlp_DID, pnvme_lport, pnvme_rport,
pnvme_lsreq, pnvme_lsreq->rqstlen,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
@@ -665,8 +667,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (ret != WQE_SUCCESS) {
atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6052 EXIT. issue ls wqe failed lport %p, "
- "rport %p lsreq%p Status %x DID %x\n",
+ "6052 EXIT. issue ls wqe failed lport x%px, "
+ "rport x%px lsreq x%px Status %x DID %x\n",
pnvme_lport, pnvme_rport, pnvme_lsreq,
ret, ndlp->nlp_DID);
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
@@ -723,7 +725,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
- "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
+ "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
"rsplen:%d %pad %pad\n",
pnvme_lport, pnvme_rport,
pnvme_lsreq, pnvme_lsreq->rqstlen,
@@ -984,8 +986,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (!lpfc_ncmd->nvmeCmd) {
spin_unlock(&lpfc_ncmd->buf_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
- "nvmeCmd %p\n",
+ "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
+ "nvmeCmd x%px\n",
lpfc_ncmd, lpfc_ncmd->nvmeCmd);
/* Release the lpfc_ncmd regardless of the missing elements. */
@@ -998,9 +1000,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
- if (vport->localport) {
+ if (unlikely(status && vport->localport)) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (lport && status) {
+ if (lport) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_fcp_xb);
atomic_inc(&lport->cmpl_fcp_err);
@@ -1100,8 +1102,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_IOERR,
- "6032 Delay Aborted cmd %p "
- "nvme cmd %p, xri x%x, "
+ "6032 Delay Aborted cmd x%px "
+ "nvme cmd x%px, xri x%x, "
"xb %d\n",
lpfc_ncmd, nCmd,
lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -1140,7 +1142,7 @@ out_err:
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
lpfc_nvme_ktime(phba, lpfc_ncmd);
}
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
uint32_t cpu;
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
cpu = raw_smp_processor_id();
@@ -1253,6 +1255,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
sizeof(uint32_t) * 8);
cstat->control_requests++;
}
+
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
@@ -1304,14 +1309,16 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
struct scatterlist *data_sg;
struct sli4_sge *first_data_sgl;
struct ulp_bde64 *bde;
- dma_addr_t physaddr;
+ dma_addr_t physaddr = 0;
uint32_t num_bde = 0;
- uint32_t dma_len;
+ uint32_t dma_len = 0;
uint32_t dma_offset = 0;
- int nseg, i;
+ int nseg, i, j;
+ bool lsp_just_set = false;
/* Fix up the command and response DMA stuff. */
lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
@@ -1348,6 +1355,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
*/
nseg = nCmd->sg_cnt;
data_sg = nCmd->first_sgl;
+
+ /* for tracking the segment boundaries */
+ j = 2;
for (i = 0; i < nseg; i++) {
if (data_sg == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1356,23 +1366,76 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
lpfc_ncmd->seg_cnt = 0;
return 1;
}
- physaddr = data_sg->dma_address;
- dma_len = data_sg->length;
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((num_bde + 1) == nseg)
+
+ sgl->word2 = 0;
+ if ((num_bde + 1) == nseg) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ } else {
bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(dma_len);
-
- dma_offset += dma_len;
- data_sg = sg_next(data_sg);
- sgl++;
+
+ /* expand the segment */
+ if (!lsp_just_set &&
+ !((j + 1) % phba->border_sge_num) &&
+ ((nseg - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(
+ phba, lpfc_ncmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_ncmd->seg_cnt = 0;
+ return 1;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ }
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+ LPFC_SGE_TYPE_LSP)) {
+ if ((nseg - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+ physaddr = data_sg->dma_address;
+ dma_len = data_sg->length;
+ sgl->addr_lo = cpu_to_le32(
+ putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(
+ putPaddrHigh(physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ data_sg = sg_next(data_sg);
+
+ sgl++;
+
+ lsp_just_set = false;
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
}
if (phba->cfg_enable_pbde) {
/* Use PBDE support for first SGL only, offset == 0 */
@@ -1474,7 +1537,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- if (vport->load_flag & FC_UNLOADING) {
+ if (unlikely(vport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -1505,8 +1568,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
- "6053 Fail IO, ndlp not ready: rport %p "
- "ndlp %p, DID x%06x\n",
+ "6053 Busy IO, ndlp not ready: rport x%px "
+ "ndlp x%px, DID x%06x\n",
rport, ndlp, pnvme_rport->port_id);
atomic_inc(&lport->xmt_fcp_err);
ret = -EBUSY;
@@ -1758,7 +1821,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6002 Abort Request to rport DID x%06x "
- "for nvme_fc_req %p\n",
+ "for nvme_fc_req x%px\n",
pnvme_rport->port_id,
pnvme_fcreq);
@@ -1767,7 +1830,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6139 Driver in reset cleanup - flushing "
@@ -1805,8 +1868,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6143 NVME req mismatch: "
- "lpfc_nbuf %p nvmeCmd %p, "
- "pnvme_fcreq %p. Skipping Abort xri x%x\n",
+ "lpfc_nbuf x%px nvmeCmd x%px, "
+ "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1815,7 +1878,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Don't abort IOs no longer on the pending queue. */
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6142 NVME IO req %p not queued - skipping "
+ "6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1830,8 +1893,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6144 Outstanding NVME I/O Abort Request "
- "still pending on nvme_fcreq %p, "
- "lpfc_ncmd %p xri x%x\n",
+ "still pending on nvme_fcreq x%px, "
+ "lpfc_ncmd %px xri x%x\n",
pnvme_fcreq, lpfc_nbuf,
nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1841,7 +1904,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (!abts_buf) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6136 No available abort wqes. Skipping "
- "Abts req for nvme_fcreq %p xri x%x\n",
+ "Abts req for nvme_fcreq x%px xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
}
@@ -1855,7 +1918,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* WQEs are reused. Clear stale data and set key fields to
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
*/
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+ memset(abts_wqe, 0, sizeof(*abts_wqe));
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
/* word 7 */
@@ -1892,7 +1955,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (ret_val) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6137 Failed abts issue_wqe with status x%x "
- "for nvme_fcreq %p.\n",
+ "for nvme_fcreq x%px.\n",
ret_val, pnvme_fcreq);
lpfc_sli_release_iocbq(phba, abts_buf);
return;
@@ -1982,7 +2045,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sgl->word2 = cpu_to_le32(sgl->word2);
/* Fill in word 3 / sgl_len during cmd submission */
- /* Initialize WQE */
+ /* Initialize 64 bytes only */
memset(wqe, 0, sizeof(union lpfc_wqe));
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
@@ -2028,11 +2091,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->cur_iocbq.iotag);
- spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
list_add_tail(&lpfc_ncmd->list,
- &qp->lpfc_abts_nvme_buf_list);
+ &qp->lpfc_abts_io_buf_list);
qp->abts_nvme_io_bufs++;
- spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
}
@@ -2095,8 +2158,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
"6005 Successfully registered local "
- "NVME port num %d, localP %p, private %p, "
- "sg_seg %d\n",
+ "NVME port num %d, localP x%px, private "
+ "x%px, sg_seg %d\n",
localport->port_num, localport,
localport->private,
lpfc_nvme_template.max_sgl_segments);
@@ -2157,14 +2220,14 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (unlikely(!ret)) {
pending = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
if (pring->txcmplq_cnt)
pending += pring->txcmplq_cnt;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
- "6176 Lport %p Localport %p wait "
+ "6176 Lport x%px Localport x%px wait "
"timed out. Pending %d. Renewing.\n",
lport, vport->localport, pending);
continue;
@@ -2172,7 +2235,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
break;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6177 Lport %p Localport %p Complete Success\n",
+ "6177 Lport x%px Localport x%px Complete Success\n",
lport, vport->localport);
}
#endif
@@ -2203,7 +2266,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6011 Destroying NVME localport %p\n",
+ "6011 Destroying NVME localport x%px\n",
localport);
/* lport's rport list is clear. Unregister
@@ -2253,12 +2316,12 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
if (!lport) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
- "6171 Update NVME fail. localP %p, No lport\n",
+ "6171 Update NVME fail. localP x%px, No lport\n",
localport);
return;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6012 Update NVME lport %p did x%x\n",
+ "6012 Update NVME lport x%px did x%x\n",
localport, vport->fc_myDID);
localport->port_id = vport->fc_myDID;
@@ -2268,7 +2331,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6030 bound lport %p to DID x%06x\n",
+ "6030 bound lport x%px to DID x%06x\n",
lport, localport->port_id);
#endif
}
@@ -2317,9 +2380,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irq(&vport->phba->hbalock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
- spin_unlock_irq(&vport->phba->hbalock);
- if (!oldrport)
+ if (oldrport) {
+ prev_ndlp = oldrport->ndlp;
+ spin_unlock_irq(&vport->phba->hbalock);
+ } else {
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_nlp_get(ndlp);
+ }
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
@@ -2338,25 +2405,34 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* New remoteport record does not guarantee valid
* host private memory area.
*/
- prev_ndlp = oldrport->ndlp;
if (oldrport == remote_port->private) {
/* Same remoteport - ndlp should match.
* Just reuse.
*/
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "remoteport %p wwpn 0x%llx, "
- "Data: x%x x%x %p %p x%x x%06x\n",
+ "6014 Rebind lport to current "
+ "remoteport x%px wwpn 0x%llx, "
+ "Data: x%x x%x x%px x%px x%x "
+ " x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
- prev_ndlp,
+ oldrport->ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
- return 0;
+
+ /* It's a complete rebind only if the driver
+ * is registering with the same ndlp. Otherwise
+ * the driver likely executed a node swap
+ * prior to this registration and the ndlp to
+ * remoteport binding needs to be redone.
+ */
+ if (prev_ndlp == ndlp)
+ return 0;
+
}
/* Sever the ndlp<->rport association
@@ -2390,10 +2466,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to "
- "lport %p Remoteport %p rport %p WWNN 0x%llx, "
+ "6022 Bind lport x%px to remoteport x%px "
+ "rport x%px WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+ "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
lport, remote_port, rport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role,
@@ -2423,20 +2499,23 @@ void
lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
- struct lpfc_nvme_rport *rport;
- struct nvme_fc_remote_port *remoteport;
+ struct lpfc_nvme_rport *nrport;
+ struct nvme_fc_remote_port *remoteport = NULL;
- rport = ndlp->nrport;
+ spin_lock_irq(&vport->phba->hbalock);
+ nrport = lpfc_ndlp_get_nrport(ndlp);
+ if (nrport)
+ remoteport = nrport->remoteport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6170 Rescan NPort DID x%06x type x%x "
- "state x%x rport %p\n",
- ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport);
- if (!rport)
- goto input_err;
- remoteport = rport->remoteport;
- if (!remoteport)
- goto input_err;
+ "state x%x nrport x%px remoteport x%px\n",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
+ nrport, remoteport);
+
+ if (!nrport || !remoteport)
+ goto rescan_exit;
/* Only rescan if we are an NVME target in the MAPPED state */
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
@@ -2449,10 +2528,10 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, remoteport->port_state);
}
return;
-input_err:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6169 State error: lport %p, rport%p FCID x%06x\n",
- vport->localport, ndlp->rport, ndlp->nlp_DID);
+ rescan_exit:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6169 Skip NVME Rport Rescan, NVME remoteport "
+ "unregistered\n");
#endif
}
@@ -2499,7 +2578,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
goto input_err;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6033 Unreg nvme remoteport %p, portname x%llx, "
+ "6033 Unreg nvme remoteport x%px, portname x%llx, "
"port_id x%06x, portstate x%x port type x%x\n",
remoteport, remoteport->port_name,
remoteport->port_id, remoteport->port_state,
@@ -2537,7 +2616,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
input_err:
#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6168 State error: lport %p, rport%p FCID x%06x\n",
+ "6168 State error: lport x%px, rport x%px FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
@@ -2545,6 +2624,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* NVME aborted xri. Aborted NVME IO commands are completed to the transport
@@ -2552,59 +2632,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx)
+ struct sli4_wcqe_xri_aborted *axri,
+ struct lpfc_io_buf *lpfc_ncmd)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL;
- struct lpfc_nodelist *ndlp;
- struct lpfc_sli4_hdw_queue *qp;
- unsigned long iflag = 0;
+ struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
- qp = &phba->sli4_hba.hdwq[idx];
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&qp->abts_nvme_buf_list_lock);
- list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
- &qp->lpfc_abts_nvme_buf_list, list) {
- if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
- list_del_init(&lpfc_ncmd->list);
- qp->abts_nvme_io_bufs--;
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- lpfc_ncmd->status = IOSTAT_SUCCESS;
- spin_unlock(&qp->abts_nvme_buf_list_lock);
-
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- ndlp = lpfc_ncmd->ndlp;
- if (ndlp)
- lpfc_sli4_abts_err_handler(phba, ndlp, axri);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6311 nvme_cmd %p xri x%x tag x%x "
- "abort complete and xri released\n",
- lpfc_ncmd->nvmeCmd, xri,
- lpfc_ncmd->cur_iocbq.iotag);
-
- /* Aborted NVME commands are required to not complete
- * before the abort exchange command fully completes.
- * Once completed, it is available via the put list.
- */
- if (lpfc_ncmd->nvmeCmd) {
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
- lpfc_ncmd->nvmeCmd = NULL;
- }
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
- return;
- }
- }
- spin_unlock(&qp->abts_nvme_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6312 XRI Aborted xri x%x not found\n", xri);
+ if (ndlp)
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
+ "xri released\n",
+ lpfc_ncmd->nvmeCmd, xri,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
/**
@@ -2626,13 +2680,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
return;
- /* Cycle through all NVME rings and make sure all outstanding
+ /* Cycle through all IO rings and make sure all outstanding
* WQEs have been removed from the txcmplqs.
*/
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- if (!phba->sli4_hba.hdwq[i].nvme_wq)
+ if (!phba->sli4_hba.hdwq[i].io_wq)
continue;
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
@@ -2653,3 +2707,50 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
}
}
}
+
+void
+lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct nvmefc_fcp_req *nCmd;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+
+ if (!pwqeIn->context1) {
+ lpfc_sli_release_iocbq(phba, pwqeIn);
+ return;
+ }
+ /* For abort iocb just return, IO iocb will do a done call */
+ if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
+ CMD_ABORT_XRI_CX) {
+ lpfc_sli_release_iocbq(phba, pwqeIn);
+ return;
+ }
+ lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+
+ spin_lock(&lpfc_ncmd->buf_lock);
+ if (!lpfc_ncmd->nvmeCmd) {
+ spin_unlock(&lpfc_ncmd->buf_lock);
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ return;
+ }
+
+ nCmd = lpfc_ncmd->nvmeCmd;
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6194 NVME Cancel xri %x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag);
+
+ nCmd->transferred_length = 0;
+ nCmd->rcv_rsplen = 0;
+ nCmd->status = NVME_SC_INTERNAL;
+ freqpriv = nCmd->private;
+ freqpriv->nvme_buf = NULL;
+ lpfc_ncmd->nvmeCmd = NULL;
+
+ spin_unlock(&lpfc_ncmd->buf_lock);
+ nCmd->done(nCmd);
+
+ /* Call release with XB=1 to queue the IO into the abort list. */
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+#endif
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index faa596f9e861..9884228800a5 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1026,7 +1026,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE
*/
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
- wq = ctxp->hdwq->nvme_wq;
+ wq = ctxp->hdwq->io_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1104,7 +1104,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
- wq = ctxp->hdwq->nvme_wq;
+ wq = ctxp->hdwq->io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return;
}
@@ -1437,7 +1437,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6408 TOTAL NVMET ctx for CPU %d "
- "MRQ %d: cnt %d nextcpu %p\n",
+ "MRQ %d: cnt %d nextcpu x%px\n",
i, j, infop->nvmet_ctx_list_cnt,
infop->nvmet_ctx_next_cpu);
}
@@ -1500,7 +1500,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6026 Registered NVME "
- "targetport: %p, private %p "
+ "targetport: x%px, private x%px "
"portnm %llx nodenm %llx segs %d qs %d\n",
phba->targetport, tgtp,
pinfo.port_name, pinfo.node_name,
@@ -1555,7 +1555,7 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
return 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6007 Update NVMET port %p did x%x\n",
+ "6007 Update NVMET port x%px did x%x\n",
phba->targetport, vport->fc_myDID);
phba->targetport->port_id = vport->fc_myDID;
@@ -1790,12 +1790,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_nvmet_defer_release(phba, ctxp);
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
- if (ctxp->state == LPFC_NVMET_STE_RCV)
- lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
- else
- lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0;
@@ -1922,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ wq = phba->sli4_hba.hdwq[qidx].io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
@@ -1930,7 +1926,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
- "6179 Unreg targetport %p timeout "
+ "6179 Unreg targetport x%px timeout "
"reached.\n", phba->targetport);
lpfc_nvmet_cleanup_io_context(phba);
}
@@ -3113,7 +3109,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
+ "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
ctxp, wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
@@ -3299,7 +3295,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
@@ -3334,7 +3330,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* WQEs are reused. Clear stale data and set key fields to
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
*/
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+ memset(abts_wqe, 0, sizeof(*abts_wqe));
/* word 3 */
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f9df800e7067..fe1097666de4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -53,8 +53,6 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
-int _dump_buf_done = 1;
-
static char *dif_op_str[] = {
"PROT_NORMAL",
"PROT_READ_INSERT",
@@ -89,63 +87,6 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
- void *src, *dst;
- struct scatterlist *sgde = scsi_sglist(cmnd);
-
- if (!_dump_buf_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
- __func__);
- return;
- }
-
-
- if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9051 BLKGRD: ERROR: data scatterlist is null\n");
- return;
- }
-
- dst = (void *) _dump_buf_data;
- while (sgde) {
- src = sg_virt(sgde);
- memcpy(dst, src, sgde->length);
- dst += sgde->length;
- sgde = sg_next(sgde);
- }
-}
-
-static void
-lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
- void *src, *dst;
- struct scatterlist *sgde = scsi_prot_sglist(cmnd);
-
- if (!_dump_buf_dif) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
- __func__);
- return;
- }
-
- if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9053 BLKGRD: ERROR: prot scatterlist is null\n");
- return;
- }
-
- dst = _dump_buf_dif;
- while (sgde) {
- src = sg_virt(sgde);
- memcpy(dst, src, sgde->length);
- dst += sgde->length;
- sgde = sg_next(sgde);
- }
-}
-
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
@@ -537,29 +478,32 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- spin_lock(&qp->abts_scsi_buf_list_lock);
+ spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
- &qp->lpfc_abts_scsi_buf_list, list) {
+ &qp->lpfc_abts_io_buf_list, list) {
+ if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
+ continue;
+
if (psb->rdata && psb->rdata->pnode &&
psb->rdata->pnode->vport == vport)
psb->rdata = NULL;
}
- spin_unlock(&qp->abts_scsi_buf_list_lock);
+ spin_unlock(&qp->abts_io_buf_list_lock);
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
/**
- * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * FCP or NVME aborted xri.
**/
void
-lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx)
+lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri, int idx)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
@@ -577,16 +521,25 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&qp->abts_scsi_buf_list_lock);
+ spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
- &qp->lpfc_abts_scsi_buf_list, list) {
+ &qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
- list_del(&psb->list);
- qp->abts_scsi_io_bufs--;
+ list_del_init(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
- spin_unlock(
- &qp->abts_scsi_buf_list_lock);
+#ifdef BUILD_NVME
+ if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
+ qp->abts_nvme_io_bufs--;
+ spin_unlock(&qp->abts_io_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
+ return;
+ }
+#endif
+ qp->abts_scsi_io_bufs--;
+ spin_unlock(&qp->abts_io_buf_list_lock);
+
if (psb->rdata && psb->rdata->pnode)
ndlp = psb->rdata->pnode;
else
@@ -605,12 +558,12 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&qp->abts_scsi_buf_list_lock);
+ spin_unlock(&qp->abts_io_buf_list_lock);
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ (iocbq->iocb_flag & LPFC_IO_LIBDFC))
continue;
if (iocbq->sli4_xritag != xri)
continue;
@@ -685,8 +638,9 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
- uint32_t sgl_size, cpu, idx;
+ uint32_t cpu, idx;
int tag;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
@@ -704,9 +658,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
return NULL;
}
- sgl_size = phba->cfg_sg_dma_buf_size -
- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
-
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
@@ -721,9 +672,12 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
lpfc_cmd->prot_data_type = 0;
#endif
- lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
- lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
- sizeof(struct fcp_cmnd));
+ tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
+ if (!tmp)
+ return NULL;
+
+ lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
+ lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
/*
* The first two SGEs are the FCP_CMD and FCP_RSP.
@@ -731,7 +685,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* first two and leave the rest for queuecommand.
*/
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size);
+ pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
sgl->word2 = le32_to_cpu(sgl->word2);
@@ -835,11 +789,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
qp = psb->hdwq;
if (psb->exch_busy) {
- spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
- list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
+ list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++;
- spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else {
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
}
@@ -918,9 +872,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
- return 1;
+ return 2;
}
/*
@@ -1774,7 +1729,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ "9020 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
}
@@ -1989,7 +1944,8 @@ out:
**/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
- struct sli4_sge *sgl, int datasegcnt)
+ struct sli4_sge *sgl, int datasegcnt,
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
@@ -2003,6 +1959,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t checking = 1;
uint32_t dma_len;
uint32_t dma_offset = 0;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
+ int j;
+ bool lsp_just_set = false;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
if (status)
@@ -2062,23 +2021,64 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgl++;
/* assumption: caller has already run dma_map_sg on command data */
- scsi_for_each_sg(sc, sgde, datasegcnt, i) {
- physaddr = sg_dma_address(sgde);
- dma_len = sg_dma_len(sgde);
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- if ((i + 1) == datasegcnt)
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ sgde = scsi_sglist(sc);
+ j = 3;
+ for (i = 0; i < datasegcnt; i++) {
+ /* clear it */
+ sgl->word2 = 0;
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
+ /* do we need to expand the segment */
+ if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
+ ((datasegcnt - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_cmd->seg_cnt = 0;
+ return 0;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
+ if ((datasegcnt - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ physaddr = sg_dma_address(sgde);
+ dma_len = sg_dma_len(sgde);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ sgde = sg_next(sgde);
+
+ sgl++;
+ num_sge++;
+ lsp_just_set = false;
+
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
- sgl++;
- num_sge++;
}
out:
@@ -2124,7 +2124,8 @@ out:
**/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
- struct sli4_sge *sgl, int datacnt, int protcnt)
+ struct sli4_sge *sgl, int datacnt, int protcnt,
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct scatterlist *sgpe = NULL; /* s/g prot entry */
@@ -2146,14 +2147,15 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
#endif
uint32_t checking = 1;
uint32_t dma_offset = 0;
- int num_sge = 0;
+ int num_sge = 0, j = 2;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
sgde = scsi_sglist(sc);
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ "9082 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
}
@@ -2179,9 +2181,37 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
split_offset = 0;
do {
/* Check to see if we ran out of space */
- if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+ if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
+ !(phba->cfg_xpsgl))
return num_sge + 3;
+ /* DISEED and DIF have to be together */
+ if (!((j + 1) % phba->border_sge_num) ||
+ !((j + 2) % phba->border_sge_num) ||
+ !((j + 3) % phba->border_sge_num)) {
+ sgl->word2 = 0;
+
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ goto out;
+ } else {
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+ }
+
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ j = 0;
+ }
+
/* setup DISEED with what we have */
diseed = (struct sli4_sge_diseed *) sgl;
memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2228,7 +2258,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* advance sgl and increment bde count */
num_sge++;
+
sgl++;
+ j++;
/* setup the first BDE that points to protection buffer */
protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
@@ -2243,6 +2275,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = 0;
protgrp_blks = protgroup_len / 8;
protgrp_bytes = protgrp_blks * blksize;
@@ -2263,9 +2296,14 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* setup SGE's for data blocks associated with DIF data */
pgdone = 0;
subtotal = 0; /* total bytes processed for current prot grp */
+
+ sgl++;
+ j++;
+
while (!pgdone) {
/* Check to see if we ran out of space */
- if (num_sge >= phba->cfg_total_seg_cnt)
+ if ((num_sge >= phba->cfg_total_seg_cnt) &&
+ !phba->cfg_xpsgl)
return num_sge + 1;
if (!sgde) {
@@ -2274,60 +2312,101 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
__func__);
return 0;
}
- sgl++;
- dataphysaddr = sg_dma_address(sgde) + split_offset;
- remainder = sg_dma_len(sgde) - split_offset;
+ if (!((j + 1) % phba->border_sge_num)) {
+ sgl->word2 = 0;
- if ((subtotal + remainder) <= protgrp_bytes) {
- /* we can use this whole buffer */
- dma_len = remainder;
- split_offset = 0;
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
- if ((subtotal + remainder) == protgrp_bytes)
- pgdone = 1;
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
+ lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ goto out;
+ } else {
+ sgl->addr_lo = cpu_to_le32(
+ putPaddrLow(sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(
+ putPaddrHigh(sgl_xtra->dma_phys_sgl));
+ }
+
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
} else {
- /* must split this buffer with next prot grp */
- dma_len = protgrp_bytes - subtotal;
- split_offset += dma_len;
- }
+ dataphysaddr = sg_dma_address(sgde) +
+ split_offset;
- subtotal += dma_len;
+ remainder = sg_dma_len(sgde) - split_offset;
- sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ dma_len = remainder;
+ split_offset = 0;
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
+ if ((subtotal + remainder) ==
+ protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next
+ * prot grp
+ */
+ dma_len = protgrp_bytes - subtotal;
+ split_offset += dma_len;
+ }
- num_sge++;
- curr_data++;
+ subtotal += dma_len;
- if (split_offset)
- break;
+ sgl->word2 = 0;
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ dataphysaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ dataphysaddr));
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
- /* Move to the next s/g segment if possible */
- sgde = sg_next(sgde);
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+
+ num_sge++;
+ curr_data++;
+
+ if (split_offset) {
+ sgl++;
+ j++;
+ break;
+ }
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+
+ sgl++;
+ }
+
+ j++;
}
if (protgroup_offset) {
/* update the reference tag */
reftag += protgrp_blks;
- sgl++;
continue;
}
/* are we done ? */
if (curr_prot == protcnt) {
+ /* mark the last SGL */
+ sgl--;
bf_set(lpfc_sli4_sge_last, sgl, 1);
alldone = 1;
} else if (curr_prot < protcnt) {
/* advance to next prot buffer */
sgpe = sg_next(sgpe);
- sgl++;
/* update the reference tag */
reftag += protgrp_blks;
@@ -2430,7 +2509,10 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
*
* This is the protection/DIF aware version of
* lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
- * two functions eventually, but for now, it's here
+ * two functions eventually, but for now, it's here.
+ * RETURNS 0 - SUCCESS,
+ * 1 - Failed DMA map, retry.
+ * 2 - Invalid scsi cmd or prot-type. Do not rety.
**/
static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
@@ -2444,6 +2526,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ int ret = 1;
struct lpfc_vport *vport = phba->pport;
/*
@@ -2467,8 +2550,11 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
lpfc_cmd->seg_cnt = datasegcnt;
/* First check if data segment count from SCSI Layer is good */
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+ ret = 2;
goto err;
+ }
prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
@@ -2476,14 +2562,18 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
case LPFC_PG_TYPE_NO_DIF:
/* Here we need to add a PDE5 and PDE6 to the count */
- if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+ if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
+ ret = 2;
goto err;
+ }
num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
datasegcnt);
/* we should have 2 or more entries in buffer list */
- if (num_bde < 2)
+ if (num_bde < 2) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_DIF_BUF:
@@ -2507,15 +2597,19 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
* protection data segment.
*/
if ((lpfc_cmd->prot_seg_cnt * 4) >
- (phba->cfg_total_seg_cnt - 2))
+ (phba->cfg_total_seg_cnt - 2)) {
+ ret = 2;
goto err;
+ }
num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
datasegcnt, protsegcnt);
/* we should have 3 or more entries in buffer list */
if ((num_bde < 3) ||
- (num_bde > phba->cfg_total_seg_cnt))
+ (num_bde > phba->cfg_total_seg_cnt)) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_INVALID:
@@ -2526,7 +2620,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"9022 Unexpected protection group %i\n",
prot_group_type);
- return 1;
+ return 2;
}
}
@@ -2576,7 +2670,7 @@ err:
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->prot_seg_cnt = 0;
- return 1;
+ return ret;
}
/*
@@ -2809,26 +2903,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
uint32_t bgstat = bgf->bgstat;
uint64_t failing_sector = 0;
- spin_lock(&_dump_buf_lock);
- if (!_dump_buf_done) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
- " Data for %u blocks to debugfs\n",
- (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
- lpfc_debug_save_data(phba, cmd);
-
- /* If we have a prot sgl, save the DIF buffer */
- if (lpfc_prot_group_type(phba, cmd) ==
- LPFC_PG_TYPE_DIF_BUF) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
- "Saving DIF for %u blocks to debugfs\n",
- (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
- lpfc_debug_save_dif(phba, cmd);
- }
-
- _dump_buf_done = 1;
- }
- spin_unlock(&_dump_buf_lock);
-
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2962,7 +3036,8 @@ out:
* field of @lpfc_cmd for device with SLI-4 interface spec.
*
* Return codes:
- * 1 - Error
+ * 2 - Error - Do not retry
+ * 1 - Error - Retry
* 0 - Success
**/
static int
@@ -2978,8 +3053,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
- int nseg;
+ int nseg, i, j;
struct ulp_bde64 *bde;
+ bool lsp_just_set = false;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -3006,15 +3083,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
sgl += 1;
first_data_sgl = sgl;
lpfc_cmd->seg_cnt = nseg;
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ if (!phba->cfg_xpsgl &&
+ lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
" %s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
- return 1;
+ return 2;
}
/*
@@ -3026,22 +3105,80 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* the IOCB. If it can't then the BDEs get added to a BPL as it
* does for SLI-2 mode.
*/
- scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
- physaddr = sg_dma_address(sgel);
- dma_len = sg_dma_len(sgel);
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((num_bde + 1) == nseg)
+
+ /* for tracking segment boundaries */
+ sgel = scsi_sglist(scsi_cmnd);
+ j = 2;
+ for (i = 0; i < nseg; i++) {
+ sgl->word2 = 0;
+ if ((num_bde + 1) == nseg) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ } else {
bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
- sgl++;
+
+ /* do we need to expand the segment */
+ if (!lsp_just_set &&
+ !((j + 1) % phba->border_sge_num) &&
+ ((nseg - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(
+ phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_cmd->seg_cnt = 0;
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ }
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+ LPFC_SGE_TYPE_LSP)) {
+ if ((nseg - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+ physaddr = sg_dma_address(sgel);
+ dma_len = sg_dma_len(sgel);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ sgel = sg_next(sgel);
+
+ sgl++;
+ lsp_just_set = false;
+
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
}
/*
* Setup the first Payload BDE. For FCoE we just key off
@@ -3110,6 +3247,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* This is the protection/DIF aware version of
* lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
* two functions eventually, but for now, it's here
+ * Return codes:
+ * 2 - Error - Do not retry
+ * 1 - Error - Retry
+ * 0 - Success
**/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
@@ -3123,6 +3264,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ int ret = 1;
struct lpfc_vport *vport = phba->pport;
/*
@@ -3152,23 +3294,33 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
lpfc_cmd->seg_cnt = datasegcnt;
/* First check if data segment count from SCSI Layer is good */
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
+ !phba->cfg_xpsgl) {
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+ ret = 2;
goto err;
+ }
prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
switch (prot_group_type) {
case LPFC_PG_TYPE_NO_DIF:
/* Here we need to add a DISEED to the count */
- if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+ if (((lpfc_cmd->seg_cnt + 1) >
+ phba->cfg_total_seg_cnt) &&
+ !phba->cfg_xpsgl) {
+ ret = 2;
goto err;
+ }
num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
- datasegcnt);
+ datasegcnt, lpfc_cmd);
/* we should have 2 or more entries in buffer list */
- if (num_sge < 2)
+ if (num_sge < 2) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_DIF_BUF:
@@ -3190,17 +3342,23 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
* There is a minimun of 3 SGEs used for every
* protection data segment.
*/
- if ((lpfc_cmd->prot_seg_cnt * 3) >
- (phba->cfg_total_seg_cnt - 2))
+ if (((lpfc_cmd->prot_seg_cnt * 3) >
+ (phba->cfg_total_seg_cnt - 2)) &&
+ !phba->cfg_xpsgl) {
+ ret = 2;
goto err;
+ }
num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
- datasegcnt, protsegcnt);
+ datasegcnt, protsegcnt, lpfc_cmd);
/* we should have 3 or more entries in buffer list */
- if ((num_sge < 3) ||
- (num_sge > phba->cfg_total_seg_cnt))
+ if (num_sge < 3 ||
+ (num_sge > phba->cfg_total_seg_cnt &&
+ !phba->cfg_xpsgl)) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_INVALID:
@@ -3211,7 +3369,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"9083 Unexpected protection group %i\n",
prot_group_type);
- return 1;
+ return 2;
}
}
@@ -3273,7 +3431,7 @@ err:
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->prot_seg_cnt = 0;
- return 1;
+ return ret;
}
/**
@@ -3839,7 +3997,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0710 Iodone <%d/%llu> cmd %p, error "
+ "0710 Iodone <%d/%llu> cmd x%px, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3), cmd->retries,
@@ -4454,8 +4612,12 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
- if (err)
+ if (err == 2) {
+ cmnd->result = DID_ERROR << 16;
+ goto out_fail_command_release_buf;
+ } else if (err) {
goto out_host_busy_free_buf;
+ }
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
@@ -4526,6 +4688,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
out_tgt_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
+ out_fail_command_release_buf:
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+
out_fail_command:
cmnd->scsi_done(cmnd);
return 0;
@@ -4568,7 +4733,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
@@ -4589,7 +4754,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
+ pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
goto out_unlock_buf;
@@ -4956,7 +5121,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0797 Tgt Map rport failure: rdata x%p\n", rdata);
+ "0797 Tgt Map rport failure: rdata x%px\n", rdata);
return FAILED;
}
pnode = rdata->pnode;
@@ -5054,7 +5219,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0798 Device Reset rdata failure: rdata x%p\n",
+ "0798 Device Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
@@ -5066,7 +5231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0721 Device Reset rport failure: rdata x%p\n", rdata);
+ "0721 Device Reset rport failure: rdata x%px\n", rdata);
return FAILED;
}
@@ -5125,7 +5290,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0799 Target Reset rdata failure: rdata x%p\n",
+ "0799 Target Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
@@ -5137,7 +5302,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0722 Target Reset rport failure: rdata x%p\n", rdata);
+ "0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
spin_lock_irq(shost->host_lock);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
@@ -5295,18 +5460,20 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
lpfc_offline(phba);
rc = lpfc_sli_brdrestart(phba);
if (rc)
- ret = FAILED;
+ goto error;
+
rc = lpfc_online(phba);
if (rc)
- ret = FAILED;
+ goto error;
+
lpfc_unblock_mgmt_io(phba);
- if (ret == FAILED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "3323 Failed host reset, bring it offline\n");
- lpfc_sli4_offline_eratt(phba);
- }
return ret;
+error:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "3323 Failed host reset\n");
+ lpfc_unblock_mgmt_io(phba);
+ return FAILED;
}
/**
@@ -5870,7 +6037,7 @@ struct scsi_host_template lpfc_template_no_hr = {
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f9e6a135d656..a0c6945b8139 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1391,9 +1391,12 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (!piocb->iocb_cmpl)
- lpfc_sli_release_iocbq(phba, piocb);
- else {
+ if (!piocb->iocb_cmpl) {
+ if (piocb->iocb_flag & LPFC_IO_NVME)
+ lpfc_nvme_cancel_iocb(phba, piocb);
+ else
+ lpfc_sli_release_iocbq(phba, piocb);
+ } else {
piocb->iocb.ulpStatus = ulpstatus;
piocb->iocb.un.ulpWord[4] = ulpWord4;
(piocb->iocb_cmpl) (phba, piocb, piocb);
@@ -2426,6 +2429,20 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
+static void
+__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
+ }
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+}
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
@@ -2497,7 +2514,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x %p\n",
+ "on NPort x%x Data: x%x x%x %px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
@@ -2507,7 +2524,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
- ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ __lpfc_sli_rpi_release(vport, ndlp);
}
pmb->ctx_ndlp = NULL;
}
@@ -2555,7 +2572,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport, KERN_INFO, LOG_MBOX | LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x defer x%x flg x%x "
- "map:%x %p\n",
+ "map:%x %px\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_defer_did,
ndlp->nlp_flag,
@@ -2573,7 +2590,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport, KERN_INFO, LOG_DISCOVERY,
"4111 UNREG cmpl deferred "
"clr x%x on "
- "NPort x%x Data: x%x %p\n",
+ "NPort x%x Data: x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did, ndlp);
ndlp->nlp_flag &= ~NLP_UNREG_INP;
@@ -2582,7 +2599,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_issue_els_plogi(
vport, ndlp->nlp_DID, 0);
} else {
- ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ __lpfc_sli_rpi_release(vport, ndlp);
}
}
}
@@ -2695,7 +2712,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
+ "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
@@ -3961,7 +3978,7 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
lpfc_sli_abort_iocb_ring(phba, pring);
}
} else {
@@ -3971,17 +3988,17 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
+ * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
* @phba: Pointer to HBA context object.
*
- * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * This function flushes all iocbs in the IO ring and frees all the iocb
* objects in txq and txcmplq. This function will not issue abort iocbs
* for all the iocb commands in txcmplq, they will just be returned with
* IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
* slot has been permanently disabled.
**/
void
-lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
{
LIST_HEAD(txq);
LIST_HEAD(txcmplq);
@@ -3992,13 +4009,13 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* Indicate the I/O queues are flushed */
- phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
+ phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
spin_lock_irq(&pring->ring_lock);
/* Retrieve everything on txq */
@@ -4046,56 +4063,6 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
- * @phba: Pointer to HBA context object.
- *
- * This function flushes all wqes in the nvme rings and frees all resources
- * in the txcmplq. This function does not issue abort wqes for the IO
- * commands in txcmplq, they will just be returned with
- * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
- * slot has been permanently disabled.
- **/
-void
-lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
-{
- LIST_HEAD(txcmplq);
- struct lpfc_sli_ring *pring;
- uint32_t i;
- struct lpfc_iocbq *piocb, *next_iocb;
-
- if ((phba->sli_rev < LPFC_SLI_REV4) ||
- !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
-
- /* Hint to other driver operations that a flush is in progress. */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
- spin_unlock_irq(&phba->hbalock);
-
- /* Cycle through all NVME rings and complete each IO with
- * a local driver reason code. This is a flush so no
- * abort exchange to FW.
- */
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
-
- spin_lock_irq(&pring->ring_lock);
- list_for_each_entry_safe(piocb, next_iocb,
- &pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
- /* Retrieve everything on the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq);
- pring->txcmplq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
-
- /* Flush the txcmpq &&&PAE */
- lpfc_sli_cancel_iocbs(phba, &txcmplq,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
- }
-}
-
-/**
* lpfc_sli_brdready_s3 - Check for sli3 host ready status
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
@@ -4495,7 +4462,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
* checking during resets the device. The caller is not required to hold
* any locks.
*
- * This function returns 0 always.
+ * This function returns 0 on success else returns negative error code.
**/
int
lpfc_sli4_brdreset(struct lpfc_hba *phba)
@@ -4652,8 +4619,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
rc = lpfc_sli4_brdreset(phba);
- if (rc)
- return rc;
+ if (rc) {
+ phba->link_state = LPFC_HBA_ERROR;
+ goto hba_down_queue;
+ }
spin_lock_irq(&phba->hbalock);
phba->pport->stopped = 0;
@@ -4668,6 +4637,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
if (hba_aer_enabled)
pci_disable_pcie_error_reporting(phba->pcidev);
+hba_down_queue:
lpfc_hba_down_post(phba);
lpfc_sli4_queue_destroy(phba);
@@ -5584,10 +5554,8 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = &sli4_hba->hdwq[qidx];
/* ARM the corresponding CQ */
- sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
- LPFC_QUEUE_REARM);
- sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
- LPFC_QUEUE_REARM);
+ sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
+ LPFC_QUEUE_REARM);
}
/* Loop thru all IRQ vectors */
@@ -7243,7 +7211,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
else
phba->hba_flag &= ~HBA_FIP_SUPPORT;
- phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+ phba->hba_flag &= ~HBA_IOQ_FLUSH;
if (phba->sli_rev != LPFC_SLI_REV4) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7972,7 +7940,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+ "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
mb->mbxCommand,
phba->pport->port_state,
phba->sli.sli_flag,
@@ -9333,11 +9301,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
memset(wqe, 0, sizeof(union lpfc_wqe128));
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
- /* The ct field has moved so reset */
- wqe->generic.wqe_com.word7 = 0;
- wqe->generic.wqe_com.word10 = 0;
- }
+ /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word7 = 0;
+ wqe->generic.wqe_com.word10 = 0;
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
@@ -9796,7 +9762,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* we re-construct this WQE here based on information in
* iocbq from scratch.
*/
- memset(wqe, 0, sizeof(union lpfc_wqe));
+ memset(wqe, 0, sizeof(*wqe));
/* OX_ID is invariable to who sent ABTS to CT exchange */
bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
@@ -9843,6 +9809,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
break;
case CMD_SEND_FRAME:
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
+ bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
+ bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
+ bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
return 0;
@@ -9904,7 +9879,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
/* Get the WQ */
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
+ wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
} else {
wq = phba->sli4_hba.els_wq;
}
@@ -10051,7 +10026,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
- return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
+ return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
return NULL;
@@ -10504,7 +10479,7 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_FCP_RING;
pring->txcmplq_cnt = 0;
@@ -10523,16 +10498,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
spin_lock_init(&pring->ring_lock);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
- pring->flag = 0;
- pring->ringno = LPFC_FCP_RING;
- pring->txcmplq_cnt = 0;
- INIT_LIST_HEAD(&pring->txq);
- INIT_LIST_HEAD(&pring->txcmplq);
- INIT_LIST_HEAD(&pring->iocb_continueq);
- spin_lock_init(&pring->ring_lock);
- }
pring = phba->sli4_hba.nvmels_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_ELS_RING;
@@ -10796,9 +10761,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
pring = qp->pring;
if (!pring)
continue;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&pring->txq, &completions);
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
if (pring == phba->sli4_hba.els_wq->pring) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
@@ -10979,7 +10944,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0402 Cannot find virtual addr for buffer tag on "
- "ring %d Data x%lx x%p x%p x%x\n",
+ "ring %d Data x%lx x%px x%px x%x\n",
pring->ringno, (unsigned long) tag,
slp->next, slp->prev, pring->postbufq_cnt);
@@ -11023,7 +10988,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0410 Cannot find virtual addr for mapped buf on "
- "ring %d Data x%llx x%p x%p x%x\n",
+ "ring %d Data x%llx x%px x%px x%x\n",
pring->ringno, (unsigned long long)phys,
slp->next, slp->prev, pring->postbufq_cnt);
return NULL;
@@ -11078,13 +11043,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb = phba->sli.iocbq_lookup[abort_context];
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
- "0327 Cannot abort els iocb %p "
+ "0327 Cannot abort els iocb x%px "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]);
spin_unlock_irq(&phba->hbalock);
+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
+ lpfc_sli_release_iocbq(phba, abort_iocb);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11493,7 +11461,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
int i;
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
+ if (phba->hba_flag & HBA_IOQ_FLUSH)
return errcnt;
for (i = 1; i <= phba->sli.last_iotag; i++) {
@@ -11603,7 +11571,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
spin_lock_irqsave(&phba->hbalock, iflags);
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
}
@@ -11627,7 +11595,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 =
- phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
+ phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
if (!pring_s4) {
spin_unlock(&lpfc_cmd->buf_lock);
continue;
@@ -13336,8 +13304,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
unsigned long iflags;
switch (cq->subtype) {
- case LPFC_FCP:
- lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
+ case LPFC_IO:
+ lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ }
workposted = false;
break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
@@ -13355,15 +13328,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
- case LPFC_NVME:
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support)
- lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
- else
- lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
-
- workposted = false;
- break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0603 Invalid CQ subtype %d: "
@@ -13691,7 +13655,7 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
&delay);
break;
case LPFC_WCQ:
- if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
+ if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe,
&delay);
@@ -14008,10 +13972,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->CQ_wq++;
/* Process the WQ complete event */
phba->last_completion_time = jiffies;
- if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
- lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
- (struct lpfc_wcqe_complete *)&wcqe);
- if (cq->subtype == LPFC_NVME_LS)
+ if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
(struct lpfc_wcqe_complete *)&wcqe);
break;
@@ -16918,6 +16879,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
struct fc_vft_header *fc_vft_hdr;
uint32_t *header = (uint32_t *) fc_hdr;
+#define FC_RCTL_MDS_DIAGS 0xF4
+
switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */
case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -17445,7 +17408,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = lpfc_nlp_get(ndlp);
- ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
@@ -17928,6 +17890,17 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fcfi = bf_get(lpfc_rcqe_fcf_id,
&dmabuf->cq_event.cqe.rcqe_cmpl);
+ if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
+ vport = phba->pport;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2023 MDS Loopback %d bytes\n",
+ bf_get(lpfc_rcqe_length,
+ &dmabuf->cq_event.cqe.rcqe_cmpl));
+ /* Handle MDS Loopback frames */
+ lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ return;
+ }
+
/* d_id this frame is directed to */
did = sli4_did_from_fc_hdr(fc_hdr);
@@ -18211,6 +18184,10 @@ __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2016 rpi %x not inuse\n",
+ rpi);
}
}
@@ -19461,7 +19438,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
- wq = phba->sli4_hba.hdwq[0].fcp_wq;
+ wq = phba->sli4_hba.hdwq[0].io_wq;
if (unlikely(!wq))
return 0;
pring = wq->pring;
@@ -19712,10 +19689,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
/* NVME_FCREQ and NVME_ABTS requests */
if (pwqe->iocb_flag & LPFC_IO_NVME) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- wq = qp->nvme_wq;
+ wq = qp->io_wq;
pring = wq->pring;
- bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -19732,7 +19709,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
/* NVMET requests */
if (pwqe->iocb_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- wq = qp->nvme_wq;
+ wq = qp->io_wq;
pring = wq->pring;
ctxp = pwqe->context2;
@@ -19743,7 +19720,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
pwqe->sli4_xritag);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -19790,9 +19767,7 @@ void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
pvt_pool = &qp->p_multixri_pool->pvt_pool;
pbl_pool = &qp->p_multixri_pool->pbl_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
- if (qp->nvme_wq)
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
multixri_pool->stat_pbl_count = pbl_pool->count;
multixri_pool->stat_pvt_count = pvt_pool->count;
@@ -19862,12 +19837,9 @@ void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
watermark_max = xri_limit;
watermark_min = xri_limit / 2;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
abts_io_bufs = qp->abts_scsi_io_bufs;
- if (qp->nvme_wq) {
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
- abts_io_bufs += qp->abts_nvme_io_bufs;
- }
+ abts_io_bufs += qp->abts_nvme_io_bufs;
new_watermark = txcmplq_cnt + abts_io_bufs;
new_watermark = min(watermark_max, new_watermark);
@@ -20142,12 +20114,9 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
pbl_pool = &qp->p_multixri_pool->pbl_pool;
pvt_pool = &qp->p_multixri_pool->pvt_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
abts_io_bufs = qp->abts_scsi_io_bufs;
- if (qp->nvme_wq) {
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
- abts_io_bufs += qp->abts_nvme_io_bufs;
- }
+ abts_io_bufs += qp->abts_nvme_io_bufs;
xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
xri_limit = qp->p_multixri_pool->xri_limit;
@@ -20188,6 +20157,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflag);
}
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
}
/**
@@ -20402,3 +20378,288 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
return lpfc_cmd;
}
+
+/**
+ * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to append the SGL chunk
+ *
+ * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
+ * and will allocate an SGL chunk if the pool is empty.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to sli4_hybrid_sgl - Success
+ **/
+struct sli4_hybrid_sgl *
+lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+ struct sli4_hybrid_sgl *allocated_sgl = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->sgl_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(buf_list))) {
+ /* break off 1 chunk from the sgl_list */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list, list_node) {
+ list_move_tail(&list_entry->list_node,
+ &lpfc_buf->dma_sgl_xtra_list);
+ break;
+ }
+ } else {
+ /* allocate more */
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+ cpu_to_node(smp_processor_id()));
+ if (!tmp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8353 error kmalloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ return NULL;
+ }
+
+ tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_ATOMIC, &tmp->dma_phys_sgl);
+ if (!tmp->dma_sgl) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8354 error pool_alloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ kfree(tmp);
+ return NULL;
+ }
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
+ }
+
+ allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
+ struct sli4_hybrid_sgl,
+ list_node);
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+
+ return allocated_sgl;
+}
+
+/**
+ * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the SGL chunk
+ *
+ * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
+ *
+ * Return codes:
+ * 0 - Success
+ * -EINVAL - Error
+ **/
+int
+lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+ int rc = 0;
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->sgl_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
+ list_for_each_entry_safe(list_entry, tmp,
+ &lpfc_buf->dma_sgl_xtra_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ buf_list);
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ return rc;
+}
+
+/**
+ * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup sgl buff resources on
+ *
+ * This routine frees all SGL chunks of hdwq SGL chunk pool.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq)
+{
+ struct list_head *buf_list = &hdwq->sgl_list;
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ /* Free sgl pool */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list, list_node) {
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ list_entry->dma_sgl,
+ list_entry->dma_phys_sgl);
+ list_del(&list_entry->list_node);
+ kfree(list_entry);
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+}
+
+/**
+ * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
+ *
+ * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
+ * and will allocate an CMD/RSP buffer if the pool is empty.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to fcp_cmd_rsp_buf - Success
+ **/
+struct fcp_cmd_rsp_buf *
+lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_buf)
+{
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+ struct fcp_cmd_rsp_buf *allocated_buf = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(buf_list))) {
+ /* break off 1 chunk from the list */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ &lpfc_buf->dma_cmd_rsp_list);
+ break;
+ }
+ } else {
+ /* allocate more */
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+ cpu_to_node(smp_processor_id()));
+ if (!tmp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8355 error kmalloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ return NULL;
+ }
+
+ tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+ GFP_ATOMIC,
+ &tmp->fcp_cmd_rsp_dma_handle);
+
+ if (!tmp->fcp_cmnd) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8356 error pool_alloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ kfree(tmp);
+ return NULL;
+ }
+
+ tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
+ sizeof(struct fcp_cmnd));
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
+ }
+
+ allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
+ struct fcp_cmd_rsp_buf,
+ list_node);
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+
+ return allocated_buf;
+}
+
+/**
+ * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the CMD/RSP buf
+ *
+ * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
+ *
+ * Return codes:
+ * 0 - Success
+ * -EINVAL - Error
+ **/
+int
+lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_buf)
+{
+ int rc = 0;
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
+ list_for_each_entry_safe(list_entry, tmp,
+ &lpfc_buf->dma_cmd_rsp_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ buf_list);
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ return rc;
+}
+
+/**
+ * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup cmd rsp buff resources on
+ *
+ * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq)
+{
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ /* Free cmd_rsp buf pool */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list,
+ list_node) {
+ dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
+ list_entry->fcp_cmnd,
+ list_entry->fcp_cmd_rsp_dma_handle);
+ list_del(&list_entry->list_node);
+ kfree(list_entry);
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 467b8270f7fd..37fbcb46387e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -365,9 +365,18 @@ struct lpfc_io_buf {
/* Common fields */
struct list_head list;
void *data;
+
dma_addr_t dma_handle;
dma_addr_t dma_phys_sgl;
- struct sli4_sge *dma_sgl;
+
+ struct sli4_sge *dma_sgl; /* initial segment chunk */
+
+ /* linked list of extra sli4_hybrid_sge */
+ struct list_head dma_sgl_xtra_list;
+
+ /* list head for fcp_cmd_rsp buf */
+ struct list_head dma_cmd_rsp_list;
+
struct lpfc_iocbq cur_iocbq;
struct lpfc_sli4_hdw_queue *hdwq;
uint16_t hdwq_no;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a81ef0293696..0d4882a9e634 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -49,9 +49,6 @@
#define LPFC_FCP_MQ_THRESHOLD_MAX 256
#define LPFC_FCP_MQ_THRESHOLD_DEF 8
-/* Common buffer size to accomidate SCSI and NVME IO buffers */
-#define LPFC_COMMON_IO_BUF_SZ 768
-
/*
* Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default
@@ -114,9 +111,8 @@ enum lpfc_sli4_queue_type {
enum lpfc_sli4_queue_subtype {
LPFC_NONE,
LPFC_MBOX,
- LPFC_FCP,
+ LPFC_IO,
LPFC_ELS,
- LPFC_NVME,
LPFC_NVMET,
LPFC_NVME_LS,
LPFC_USOL
@@ -646,22 +642,17 @@ struct lpfc_eq_intr_info {
struct lpfc_sli4_hdw_queue {
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue *hba_eq; /* Event queues for HBA */
- struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
- struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
- struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
- struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
- uint16_t fcp_cq_map;
- uint16_t nvme_cq_map;
+ struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
+ struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
+ uint16_t io_cq_map;
/* Keep track of IO buffers for this hardware queue */
spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
struct list_head lpfc_io_buf_list_get;
spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
struct list_head lpfc_io_buf_list_put;
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
- struct list_head lpfc_abts_scsi_buf_list;
- spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
- struct list_head lpfc_abts_nvme_buf_list;
+ spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
+ struct list_head lpfc_abts_io_buf_list;
uint32_t total_io_bufs;
uint32_t get_io_bufs;
uint32_t put_io_bufs;
@@ -685,6 +676,13 @@ struct lpfc_sli4_hdw_queue {
uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
#endif
+
+ /* Per HDWQ pool resources */
+ struct list_head sgl_list;
+ struct list_head cmd_rsp_buf_list;
+
+ /* Lock for syncing Per HDWQ pool resources */
+ spinlock_t hdwq_lock;
};
#ifdef LPFC_HDWQ_LOCK_STAT
@@ -850,8 +848,8 @@ struct lpfc_sli4_hba {
struct lpfc_queue **cq_lookup;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
- struct list_head lpfc_abts_scsi_buf_list;
+ spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
+ struct list_head lpfc_abts_io_buf_list;
struct list_head lpfc_nvmet_sgl_list;
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list;
@@ -1056,10 +1054,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
- struct sli4_wcqe_xri_aborted *, int);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx);
+ struct sli4_wcqe_xri_aborted *axri,
+ struct lpfc_io_buf *lpfc_ncmd);
+void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri, int idx);
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
@@ -1094,6 +1093,17 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
+struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
+int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq);
+void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq);
static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
{
return q->q_pgs[idx / q->entry_cnt_per_pg] +
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f7e93aaf1e00..b8aae31ffda3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.2.0.3"
+#define LPFC_DRIVER_VERSION "12.4.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 343bc71d4615..b76646357980 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -527,9 +527,11 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,