aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c7
-rw-r--r--drivers/scsi/csiostor/csio_wr.c8
-rw-r--r--drivers/scsi/cxlflash/main.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c13
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c14
-rw-r--r--drivers/scsi/fcoe/fcoe.c17
-rw-r--r--drivers/scsi/fdomain.c6
-rw-r--r--drivers/scsi/fdomain_isa.c5
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c4
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h54
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c982
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c48
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c92
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c307
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c76
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c228
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h61
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c926
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c389
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c591
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c533
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c112
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h10
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_image.h39
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_pci.h13
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h13
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c175
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h30
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c178
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c196
-rw-r--r--drivers/scsi/ncr53c8xx.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c13
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qedf/qedf.h1
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c16
-rw-r--r--drivers/scsi/qedf/qedf_els.c38
-rw-r--r--drivers/scsi/qedf/qedf_fip.c33
-rw-r--r--drivers/scsi/qedf/qedf_io.c67
-rw-r--r--drivers/scsi/qedf/qedf_main.c178
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h132
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_dsd.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c254
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c550
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c226
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c67
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c237
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c212
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h35
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c27
-rw-r--r--drivers/scsi/qlogicpti.c10
-rw-r--r--drivers/scsi/scsi_debugfs.c5
-rw-r--r--drivers/scsi/scsi_lib.c52
-rw-r--r--drivers/scsi/scsi_logging.c48
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h20
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c236
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c102
-rw-r--r--drivers/scsi/sun3_scsi.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c2
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c40
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c4
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c41
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h4
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c18
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c281
-rw-r--r--drivers/scsi/ufs/ufshcd.h57
-rw-r--r--drivers/scsi/virtio_scsi.c88
-rw-r--r--drivers/scsi/wd33c93.c1
-rw-r--r--drivers/target/target_core_user.c20
-rw-r--r--include/linux/nvme-fc-driver.h2
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_dbg.h2
-rw-r--r--include/scsi/scsi_host.h16
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h54
-rw-r--r--include/uapi/scsi/scsi_netlink.h20
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h17
130 files changed, 5755 insertions, 3679 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index a74720486ee2..d78ef63935f9 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -54,6 +54,8 @@ Optional properties:
PHY reset from the UFS controller.
- resets : reset node register
- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP.
+- reset-gpios : A phandle and gpio specifier denoting the GPIO connected
+ to the RESET pin of the UFS memory device.
Note: If above properties are not defined it can be assumed that the supply
regulators or clocks are always on.
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 71bd717a4251..f5a85caff1a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -497,6 +497,8 @@
&ufs_mem_hc {
status = "okay";
+ reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
+
vcc-supply = <&vreg_l20a_2p95>;
vcc-max-microamp = <800000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index 2e78638eb73b..c57548b7b250 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -388,6 +388,8 @@
&ufs_mem_hc {
status = "okay";
+ reset-gpios = <&tlmm 150 GPIO_ACTIVE_LOW>;
+
vcc-supply = <&vreg_l20a_2p95>;
vcc-max-microamp = <600000>;
};
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 261d8e495fed..f5781e31f57c 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -565,8 +565,7 @@ static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
if (asd_ha->hw_prof.scb_ext)
asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext);
- if (asd_ha->hw_prof.ddb_bitmap)
- kfree(asd_ha->hw_prof.ddb_bitmap);
+ kfree(asd_ha->hw_prof.ddb_bitmap);
asd_ha->hw_prof.ddb_bitmap = NULL;
for (i = 0; i < ASD_MAX_PHYS; i++) {
@@ -641,12 +640,10 @@ Err:
static void asd_destroy_global_caches(void)
{
- if (asd_dma_token_cache)
- kmem_cache_destroy(asd_dma_token_cache);
+ kmem_cache_destroy(asd_dma_token_cache);
asd_dma_token_cache = NULL;
- if (asd_ascb_cache)
- kmem_cache_destroy(asd_ascb_cache);
+ kmem_cache_destroy(asd_ascb_cache);
asd_ascb_cache = NULL;
}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index b2014cb96f58..22f06be2606f 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -536,7 +536,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
struct device *dev)
{
struct bfad_im_port_pointer *im_portp;
- int error = 1;
+ int error;
mutex_lock(&bfad_mutex);
error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9ff9429395eb..b4bfab5edf8f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -428,7 +428,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fc_lport *lport;
struct bnx2fc_interface *interface;
struct fcoe_ctlr *ctlr;
- struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
struct sk_buff *tmp_skb;
@@ -463,7 +462,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
goto err;
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
- fh = (struct fc_frame_header *) skb_transport_header(skb);
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 747f019fb393..f069e09beb10 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -633,7 +633,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
u16 xid;
u32 frame_len, len;
struct bnx2fc_cmd *io_req = NULL;
- struct fcoe_task_ctx_entry *task, *task_page;
struct bnx2fc_interface *interface = tgt->port->priv;
struct bnx2fc_hba *hba = interface->hba;
int task_idx, index;
@@ -711,9 +710,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)
- hba->task_ctx[task_idx];
- task = &(task_page[index]);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (!io_req)
@@ -839,9 +835,6 @@ ret_err_rqe:
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[task_idx];
- task = &(task_page[index]);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (!io_req)
goto ret_warn_rqe;
@@ -1122,7 +1115,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
struct fcoe_kcqe *ofld_kcqe)
{
struct bnx2fc_rport *tgt;
- struct fcoe_port *port;
struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
@@ -1136,7 +1128,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
}
BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
ofld_kcqe->fcoe_conn_context_id);
- port = tgt->port;
interface = tgt->port->priv;
if (hba != interface->hba) {
printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
@@ -1463,10 +1454,7 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
{
struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
- struct bnx2fc_interface *interface = tgt->port->priv;
struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
- struct fcoe_task_ctx_entry *orig_task;
- struct fcoe_task_ctx_entry *task_page;
struct fcoe_ext_mul_sges_ctx *sgl;
u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
u8 orig_task_type;
@@ -1528,10 +1516,6 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
index = orig_xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)
- interface->hba->task_ctx[orig_task_idx];
- orig_task = &(task_page[index]);
-
/* Multiple SGEs were used for this IO */
sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 9e50e5b53763..da00ca5fa5dc 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -930,7 +930,6 @@ abts_err:
int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
enum fc_rctl r_ctl)
{
- struct fc_lport *lport;
struct bnx2fc_rport *tgt = orig_io_req->tgt;
struct bnx2fc_interface *interface;
struct fcoe_port *port;
@@ -948,7 +947,6 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
port = orig_io_req->port;
interface = port->priv;
- lport = port->lport;
cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
if (!cb_arg) {
@@ -999,7 +997,6 @@ cleanup_err:
int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
{
- struct fc_lport *lport;
struct bnx2fc_rport *tgt = io_req->tgt;
struct bnx2fc_interface *interface;
struct fcoe_port *port;
@@ -1015,7 +1012,6 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
port = io_req->port;
interface = port->priv;
- lport = port->lport;
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
if (!cleanup_io_req) {
@@ -1927,8 +1923,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp;
struct bnx2fc_rport *tgt = io_req->tgt;
struct scsi_cmnd *sc_cmd;
- struct Scsi_Host *host;
-
/* scsi_cmd_cmpl is called with tgt lock held */
@@ -1957,7 +1951,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* parse fcp_rsp and obtain sense data from RQ if available */
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
- host = sc_cmd->device->host;
if (!sc_cmd->SCp.ptr) {
printk(KERN_ERR PFX "SCp.ptr is NULL\n");
return;
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 03bd896cdbb9..0ca695110f54 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -1316,7 +1316,6 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
u32 fl_align = clsz < 32 ? 32 : clsz;
u32 pack_align;
u32 ingpad, ingpack;
- int pcie_cap;
csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
@@ -1347,8 +1346,7 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
* multiple of the Maximum Payload Size.
*/
pack_align = fl_align;
- pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap) {
+ if (pci_is_pcie(hw->pdev)) {
u32 mps, mps_log;
u16 devctl;
@@ -1356,9 +1354,7 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
* [bits 7:5] encodes sizes as powers of 2 starting at
* 128 bytes.
*/
- pci_read_config_word(hw->pdev,
- pcie_cap + PCI_EXP_DEVCTL,
- &devctl);
+ pcie_capability_read_word(hw->pdev, PCI_EXP_DEVCTL, &devctl);
mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
mps = 1 << mps_log;
if (mps > pack_align)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b1f4724efde2..93ef97af22df 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -753,10 +753,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
+ /* fall through */
case UNMAP_TWO:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
+ /* fall through */
case UNMAP_ONE:
cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
+ /* fall through */
case FREE_IRQ:
cfg->ops->free_afu_irqs(hwq->ctx_cookie);
/* fall through */
@@ -973,14 +976,18 @@ static void cxlflash_remove(struct pci_dev *pdev)
switch (cfg->init_state) {
case INIT_STATE_CDEV:
cxlflash_release_chrdev(cfg);
+ /* fall through */
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
+ /* fall through */
case INIT_STATE_AFU:
term_afu(cfg);
+ /* fall through */
case INIT_STATE_PCI:
cfg->ops->destroy_afu(cfg->afu_cookie);
pci_disable_device(pdev);
+ /* fall through */
case INIT_STATE_NONE:
free_mem(cfg);
scsi_host_put(cfg->host);
@@ -2353,11 +2360,11 @@ retry:
cxlflash_schedule_async_reset(cfg);
break;
}
- /* fall through to retry */
+ /* fall through - to retry */
case -EAGAIN:
if (++nretry < 2)
goto retry;
- /* fall through to exit */
+ /* fall through - to exit */
default:
break;
}
@@ -3017,6 +3024,7 @@ retry:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
goto retry;
+ /* else, fall through */
default:
/* Ideally should not happen */
dev_err(dev, "%s: Device is not ready, state=%d\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 65f1fe343c64..5efc959493ec 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
spin_unlock(&ctlr->ms_lock);
retry:
+ memset(cdb, 0, sizeof(cdb));
+
data_size = rdac_failover_get(ctlr, &list, cdb);
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 950cd92df2ff..eb7d139ffc00 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -762,14 +762,10 @@ u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
{
- int pcie_cap_reg;
-
- pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
- if (pcie_cap_reg) {
+ if (pci_is_pcie(a->pcid)) {
u16 devcontrol;
- pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
- &devcontrol);
+ pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol);
if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
PCI_EXP_DEVCTL_READRQ_512B) {
@@ -778,9 +774,8 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
- pci_write_config_word(a->pcid,
- pcie_cap_reg + PCI_EXP_DEVCTL,
- devcontrol);
+ pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL,
+ devcontrol);
}
}
}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 3d130523c288..442c5e70a7b4 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -757,7 +757,6 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
struct atto_hba_get_adapter_info *gai =
&hi->data.get_adap_info;
- int pcie_cap_reg;
if (hi->flags & HBAF_TUNNEL) {
hi->status = ATTO_STS_UNSUPPORTED;
@@ -784,17 +783,14 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
- pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
- if (pcie_cap_reg) {
+ if (pci_is_pcie(a->pcid)) {
u16 stat;
u32 caps;
- pci_read_config_word(a->pcid,
- pcie_cap_reg + PCI_EXP_LNKSTA,
- &stat);
- pci_read_config_dword(a->pcid,
- pcie_cap_reg + PCI_EXP_LNKCAP,
- &caps);
+ pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA,
+ &stat);
+ pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
+ &caps);
gai->pci.link_speed_curr =
(u8)(stat & PCI_EXP_LNKSTA_CLS);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 587d4bbb7d22..25dae9f0b205 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1250,15 +1250,21 @@ static int __init fcoe_if_init(void)
/* attach to scsi transport */
fcoe_nport_scsi_transport =
fc_attach_transport(&fcoe_nport_fc_functions);
+ if (!fcoe_nport_scsi_transport)
+ goto err;
+
fcoe_vport_scsi_transport =
fc_attach_transport(&fcoe_vport_fc_functions);
-
- if (!fcoe_nport_scsi_transport) {
- printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
- return -ENODEV;
- }
+ if (!fcoe_vport_scsi_transport)
+ goto err_vport;
return 0;
+
+err_vport:
+ fc_release_transport(fcoe_nport_scsi_transport);
+err:
+ printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
+ return -ENODEV;
}
/**
@@ -1617,7 +1623,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
else
fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
- fh = (struct fc_frame_header *) skb_transport_header(skb);
fh = fc_frame_header_get(fp);
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
return 0;
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index b5e66971b6d9..772bdc93930a 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -166,7 +166,7 @@ static int fdomain_test_loopback(int base)
static void fdomain_reset(int base)
{
- outb(1, base + REG_BCTL);
+ outb(BCTL_RST, base + REG_BCTL);
mdelay(20);
outb(0, base + REG_BCTL);
mdelay(1150);
@@ -306,7 +306,7 @@ static void fdomain_work(struct work_struct *work)
status = inb(fd->base + REG_BSTAT);
if (status & BSTAT_REQ) {
- switch (status & 0x0e) {
+ switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) {
case BSTAT_CMD: /* COMMAND OUT */
outb(cmd->cmnd[cmd->SCp.sent_command++],
fd->base + REG_SCSI_DATA);
@@ -331,7 +331,7 @@ static void fdomain_work(struct work_struct *work)
case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */
outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
break;
- case BSTAT_MSG | BSTAT_IO | BSTAT_CMD: /* MESSAGE IN */
+ case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
if (!cmd->SCp.Message)
++done;
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
index 28639adf8219..f2da4fa382e8 100644
--- a/drivers/scsi/fdomain_isa.c
+++ b/drivers/scsi/fdomain_isa.c
@@ -131,8 +131,7 @@ static int fdomain_isa_match(struct device *dev, unsigned int ndev)
if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa"))
return 0;
- irq = irqs[(inb(base + REG_CFG1) & 0x0e) >> 1];
-
+ irq = irqs[(inb(base + REG_CFG1) & CFG1_IRQ_MASK) >> 1];
if (sig)
this_id = sig->this_id;
@@ -164,7 +163,7 @@ static int fdomain_isa_param_match(struct device *dev, unsigned int ndev)
}
if (irq_ <= 0)
- irq_ = irqs[(inb(io[ndev] + REG_CFG1) & 0x0e) >> 1];
+ irq_ = irqs[(inb(io[ndev] + REG_CFG1) & CFG1_IRQ_MASK) >> 1];
sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev);
if (!sh) {
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 21991c99db7c..13f7d88d6e57 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -52,7 +52,6 @@ static struct fc_trace_flag_type *fc_trc_flag;
*/
int fnic_debugfs_init(void)
{
- int rc = -1;
fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
fnic_stats_debugfs_root = debugfs_create_dir("statistics",
@@ -70,8 +69,7 @@ int fnic_debugfs_init(void)
fc_trc_flag->fc_clear = 4;
}
- rc = 0;
- return rc;
+ return 0;
}
/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 911a5adc289c..673887e383cc 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -52,6 +52,7 @@ void fnic_handle_link(struct work_struct *work)
unsigned long flags;
int old_link_status;
u32 old_link_down_cnt;
+ u64 old_port_speed, new_port_speed;
spin_lock_irqsave(&fnic->fnic_lock, flags);
@@ -62,14 +63,19 @@ void fnic_handle_link(struct work_struct *work)
old_link_down_cnt = fnic->link_down_cnt;
old_link_status = fnic->link_status;
+ old_port_speed = atomic64_read(
+ &fnic->fnic_stats.misc_stats.current_port_speed);
+
fnic->link_status = vnic_dev_link_status(fnic->vdev);
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
+ new_port_speed = vnic_dev_port_speed(fnic->vdev);
atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
- vnic_dev_port_speed(fnic->vdev));
- shost_printk(KERN_INFO, fnic->lport->host, "Current vnic speed set to : %llu\n",
- (u64)atomic64_read(
- &fnic->fnic_stats.misc_stats.current_port_speed));
+ new_port_speed);
+ if (old_port_speed != new_port_speed)
+ shost_printk(KERN_INFO, fnic->lport->host,
+ "Current vnic speed set to : %llu\n",
+ new_port_speed);
switch (vnic_dev_port_speed(fnic->vdev)) {
case DCEM_PORTSPEED_10G:
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index da4602b63495..2fb2731f50fb 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -254,7 +254,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
int vecs = n + m + o + 1;
if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
- PCI_IRQ_MSIX) < 0) {
+ PCI_IRQ_MSIX) == vecs) {
fnic->rq_count = n;
fnic->raw_wq_count = m;
fnic->wq_copy_count = o;
@@ -280,7 +280,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->wq_copy_count >= 1 &&
fnic->cq_count >= 3 &&
fnic->intr_count >= 1 &&
- pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) < 0) {
+ pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
fnic->rq_count = 1;
fnic->raw_wq_count = 1;
fnic->wq_copy_count = 1;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 9621831e17ba..a0d01aea28f7 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -453,7 +453,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
(u64)atomic64_read(&stats->misc_stats.frame_errors));
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Firmware reported port seed: %llu\n",
+ "Firmware reported port speed: %llu\n",
(u64)atomic64_read(
&stats->misc_stats.current_port_speed));
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 42a02cc47a60..720c4d6be939 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -31,7 +31,13 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
-#define HISI_SAS_RESERVED_IPTT_CNT 96
+#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
+#define HISI_SAS_RESERVED_IPTT 96
+#define HISI_SAS_UNRESERVED_IPTT \
+ (HISI_SAS_MAX_COMMANDS - HISI_SAS_RESERVED_IPTT)
+
+#define HISI_SAS_IOST_ITCT_CACHE_NUM 64
+#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -128,7 +134,6 @@ struct hisi_sas_rst {
#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
DECLARE_COMPLETION_ONSTACK(c); \
- DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
enum hisi_sas_bit_err_type {
@@ -249,6 +254,22 @@ struct hisi_sas_debugfs_reg {
};
};
+struct hisi_sas_iost_itct_cache {
+ u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ];
+};
+
+enum hisi_sas_debugfs_reg_array_member {
+ DEBUGFS_GLOBAL = 0,
+ DEBUGFS_AXI,
+ DEBUGFS_RAS,
+ DEBUGFS_REGS_NUM
+};
+
+enum hisi_sas_debugfs_cache_type {
+ HISI_SAS_ITCT_CACHE,
+ HISI_SAS_IOST_CACHE,
+};
+
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
@@ -257,7 +278,6 @@ struct hisi_sas_hw {
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no);
- int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
void (*start_delivery)(struct hisi_sas_dq *dq);
void (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
@@ -268,8 +288,6 @@ struct hisi_sas_hw {
void (*prep_abort)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
int device_id, int abort_flag, int tag_to_abort);
- int (*slot_complete)(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot);
void (*phys_init)(struct hisi_hba *hisi_hba);
void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -288,15 +306,18 @@ struct hisi_sas_hw {
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data);
- int (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
- int delay_ms, int timeout_ms);
+ void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms);
void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
void (*snapshot_restore)(struct hisi_hba *hisi_hba);
- int max_command_entries;
+ int (*set_bist)(struct hisi_hba *hisi_hba, bool enable);
+ void (*read_iost_itct_cache)(struct hisi_hba *hisi_hba,
+ enum hisi_sas_debugfs_cache_type type,
+ u32 *cache);
int complete_hdr_size;
struct scsi_host_template *sht;
- const struct hisi_sas_debugfs_reg *debugfs_reg_global;
+ const struct hisi_sas_debugfs_reg *debugfs_reg_array[DEBUGFS_REGS_NUM];
const struct hisi_sas_debugfs_reg *debugfs_reg_port;
};
@@ -371,16 +392,28 @@ struct hisi_hba {
int cq_nvecs;
unsigned int *reply_map;
+ /* bist */
+ enum sas_linkrate debugfs_bist_linkrate;
+ int debugfs_bist_code_mode;
+ int debugfs_bist_phy_no;
+ int debugfs_bist_mode;
+ u32 debugfs_bist_cnt;
+ int debugfs_bist_enable;
+
/* debugfs memories */
- u32 *debugfs_global_reg;
+ /* Put Global AXI and RAS Register into register array */
+ u32 *debugfs_regs[DEBUGFS_REGS_NUM];
u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS];
void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES];
struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES];
struct hisi_sas_iost *debugfs_iost;
struct hisi_sas_itct *debugfs_itct;
+ u64 *debugfs_iost_cache;
+ u64 *debugfs_itct_cache;
struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry;
+ struct dentry *debugfs_bist_dentry;
bool debugfs_snapshot;
};
@@ -533,7 +566,6 @@ extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
-extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag);
extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba);
extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index cb746cfc2fa8..d1513fdf1e00 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -118,21 +118,6 @@ void hisi_sas_sata_done(struct sas_task *task,
}
EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
-int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
-{
- struct ata_queued_cmd *qc = task->uldd_task;
-
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
- *tag = qc->tag;
- return 1;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
-
/*
* This function assumes linkrate mask fits in 8 bits, which it
* does for all HW versions supported.
@@ -180,8 +165,8 @@ static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
unsigned long flags;
- if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
- hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
+ if (hisi_hba->hw->slot_index_alloc ||
+ slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_clear(hisi_hba, slot_idx);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -211,8 +196,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
if (index >= hisi_hba->slot_index_count) {
index = find_next_zero_bit(bitmap,
hisi_hba->slot_index_count,
- hisi_hba->hw->max_command_entries -
- HISI_SAS_RESERVED_IPTT_CNT);
+ HISI_SAS_UNRESERVED_IPTT);
if (index >= hisi_hba->slot_index_count) {
spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL;
@@ -301,7 +285,7 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
struct sas_task *task, int n_elem,
- int n_elem_req, int n_elem_resp)
+ int n_elem_req)
{
struct device *dev = hisi_hba->dev;
@@ -315,16 +299,13 @@ static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
if (n_elem_req)
dma_unmap_sg(dev, &task->smp_task.smp_req,
1, DMA_TO_DEVICE);
- if (n_elem_resp)
- dma_unmap_sg(dev, &task->smp_task.smp_resp,
- 1, DMA_FROM_DEVICE);
}
}
}
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
struct sas_task *task, int *n_elem,
- int *n_elem_req, int *n_elem_resp)
+ int *n_elem_req)
{
struct device *dev = hisi_hba->dev;
int rc;
@@ -332,7 +313,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
if (sas_protocol_ata(task->task_proto)) {
*n_elem = task->num_scatter;
} else {
- unsigned int req_len, resp_len;
+ unsigned int req_len;
if (task->num_scatter) {
*n_elem = dma_map_sg(dev, task->scatter,
@@ -353,17 +334,6 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
rc = -EINVAL;
goto err_out_dma_unmap;
}
- *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp,
- 1, DMA_FROM_DEVICE);
- if (!*n_elem_resp) {
- rc = -ENOMEM;
- goto err_out_dma_unmap;
- }
- resp_len = sg_dma_len(&task->smp_task.smp_resp);
- if (resp_len & 0x3) {
- rc = -EINVAL;
- goto err_out_dma_unmap;
- }
}
}
@@ -378,7 +348,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
err_out_dma_unmap:
/* It would be better to call dma_unmap_sg() here, but it's messy */
hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
- *n_elem_req, *n_elem_resp);
+ *n_elem_req);
prep_out:
return rc;
}
@@ -450,7 +420,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
struct asd_sas_port *sas_port = device->port;
struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0;
+ int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
struct hisi_sas_dq *dq;
unsigned long flags;
int wr_q_index;
@@ -486,7 +456,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
}
rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req, &n_elem_resp);
+ &n_elem_req);
if (rc < 0)
goto prep_out;
@@ -520,13 +490,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags);
- wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
- if (wr_q_index < 0) {
- spin_unlock_irqrestore(&dq->lock, flags);
- rc = -EAGAIN;
- goto err_out_tag;
- }
-
+ wr_q_index = dq->wr_point;
+ dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
spin_unlock_irqrestore(&dq->lock, flags);
spin_lock_irqsave(&sas_dev->lock, flags);
@@ -551,7 +516,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
- memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
+ memset(hisi_sas_status_buf_addr_mem(slot), 0,
+ sizeof(struct hisi_sas_err_record));
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
@@ -580,14 +546,12 @@ static int hisi_sas_task_prep(struct sas_task *task,
return 0;
-err_out_tag:
- hisi_sas_slot_index_free(hisi_hba, slot_idx);
err_out_dif_dma_unmap:
if (!sas_protocol_ata(task->task_proto))
hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
err_out_dma_unmap:
hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req, n_elem_resp);
+ n_elem_req);
prep_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
return rc;
@@ -719,13 +683,13 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
return sas_dev;
}
-#define HISI_SAS_SRST_ATA_DISK_CNT 3
+#define HISI_SAS_DISK_RECOVER_CNT 3
static int hisi_sas_init_device(struct domain_device *device)
{
int rc = TMF_RESP_FUNC_COMPLETE;
struct scsi_lun lun;
struct hisi_sas_tmf_task tmf_task;
- int retry = HISI_SAS_SRST_ATA_DISK_CNT;
+ int retry = HISI_SAS_DISK_RECOVER_CNT;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
struct sas_phy *local_phy;
@@ -735,10 +699,14 @@ static int hisi_sas_init_device(struct domain_device *device)
int_to_scsilun(0, &lun);
tmf_task.tmf = TMF_CLEAR_TASK_SET;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
- &tmf_task);
- if (rc == TMF_RESP_FUNC_COMPLETE)
- hisi_sas_release_task(hisi_hba, device);
+ while (retry-- > 0) {
+ rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
+ &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ hisi_sas_release_task(hisi_hba, device);
+ break;
+ }
+ }
break;
case SAS_SATA_DEV:
case SAS_SATA_PM:
@@ -1081,21 +1049,22 @@ static void hisi_sas_dev_gone(struct domain_device *device)
dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
+ down(&hisi_hba->sem);
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
hisi_sas_dereg_device(hisi_hba, device);
- down(&hisi_hba->sem);
hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
- up(&hisi_hba->sem);
device->lldd_dev = NULL;
}
if (hisi_hba->hw->free_device)
hisi_hba->hw->free_device(sas_dev);
sas_dev->dev_type = SAS_PHY_UNUSED;
+ sas_dev->sas_device = NULL;
+ up(&hisi_hba->sem);
}
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
@@ -1423,8 +1392,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
}
}
-static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
- u32 state)
+static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
{
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct asd_sas_port *_sas_port = NULL;
@@ -1576,16 +1544,16 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
msleep(1000);
hisi_sas_refresh_port_id(hisi_hba);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
- up(&hisi_hba->sem);
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
+ up(&hisi_hba->sem);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
state = hisi_hba->hw->get_phys_state(hisi_hba);
- hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
+ hisi_sas_rescan_topology(hisi_hba, state);
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
@@ -1770,24 +1738,34 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
- struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
- struct hisi_sas_phy *phy = container_of(sas_phy,
- struct hisi_sas_phy, sas_phy);
DECLARE_COMPLETION_ONSTACK(phyreset);
int rc, reset_type;
+ if (!local_phy->enabled) {
+ sas_put_local_phy(local_phy);
+ return -ENODEV;
+ }
+
if (scsi_is_sas_phy_local(local_phy)) {
+ struct asd_sas_phy *sas_phy =
+ sas_ha->sas_phy[local_phy->number];
+ struct hisi_sas_phy *phy =
+ container_of(sas_phy, struct hisi_sas_phy, sas_phy);
phy->in_reset = 1;
phy->reset_completion = &phyreset;
}
reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
- !dev_is_sata(device)) ? 1 : 0;
+ !dev_is_sata(device)) ? true : false;
rc = sas_phy_reset(local_phy, reset_type);
sas_put_local_phy(local_phy);
if (scsi_is_sas_phy_local(local_phy)) {
+ struct asd_sas_phy *sas_phy =
+ sas_ha->sas_phy[local_phy->number];
+ struct hisi_sas_phy *phy =
+ container_of(sas_phy, struct hisi_sas_phy, sas_phy);
int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
unsigned long flags;
@@ -1802,9 +1780,10 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
/*
* If in init state, we rely on caller to wait for link to be
- * ready; otherwise, delay.
+ * ready; otherwise, except phy reset is fail, delay.
*/
- msleep(2000);
+ if (!rc)
+ msleep(2000);
}
return rc;
@@ -1845,21 +1824,21 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
+ /* Clear internal IO and then lu reset */
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
+ goto out;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
+
if (dev_is_sata(device)) {
struct sas_phy *phy;
- /* Clear internal IO and then hardreset */
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
- if (rc < 0) {
- dev_err(dev, "lu_reset: internal abort failed\n");
- goto out;
- }
- hisi_sas_dereg_device(hisi_hba, device);
-
phy = sas_get_local_phy(device);
- rc = sas_phy_reset(phy, 1);
+ rc = sas_phy_reset(phy, true);
if (rc == 0)
hisi_sas_release_task(hisi_hba, device);
@@ -1867,14 +1846,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
} else {
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
- if (rc < 0) {
- dev_err(dev, "lu_reset: internal abort failed\n");
- goto out;
- }
- hisi_sas_dereg_device(hisi_hba, device);
-
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
@@ -1964,7 +1935,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags, flags_dq = 0;
+ unsigned long flags;
int wr_q_index;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
@@ -1983,15 +1954,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
- spin_lock_irqsave(&dq->lock, flags_dq);
- wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
- if (wr_q_index < 0) {
- spin_unlock_irqrestore(&dq->lock, flags_dq);
- rc = -EAGAIN;
- goto err_out_tag;
- }
+ spin_lock_irqsave(&dq->lock, flags);
+ wr_q_index = dq->wr_point;
+ dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
list_add_tail(&slot->delivery, &dq->list);
- spin_unlock_irqrestore(&dq->lock, flags_dq);
+ spin_unlock_irqrestore(&dq->lock, flags);
spin_lock_irqsave(&sas_dev->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
spin_unlock_irqrestore(&sas_dev->lock, flags);
@@ -2012,7 +1979,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
- memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
+ memset(hisi_sas_status_buf_addr_mem(slot), 0,
+ sizeof(struct hisi_sas_err_record));
hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
abort_flag, task_tag);
@@ -2028,8 +1996,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0;
-err_out_tag:
- hisi_sas_slot_index_free(hisi_hba, slot_idx);
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -2089,6 +2055,9 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
/* Internal abort timed out */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct)
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
+
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
@@ -2123,7 +2092,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
}
exit:
- dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n",
+ dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
SAS_ADDR(device->sas_addr), task,
task->task_status.resp, /* 0 is complete, -1 is undelivered */
task->task_status.stat);
@@ -2291,7 +2260,7 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
{
- int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries;
+ int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -2328,7 +2297,7 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
int hisi_sas_alloc(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
- int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
+ int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
int max_command_entries_ru, sz_slot_buf_ru;
int blk_cnt, slots_per_blk;
@@ -2379,7 +2348,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL);
if (!hisi_hba->itct)
goto err_out;
@@ -2396,7 +2365,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
else
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
- s = lcm(max_command_entries_ru, sz_slot_buf_ru);
+ s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
slots_per_blk = s / sz_slot_buf_ru;
@@ -2406,7 +2375,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
void *buf;
buf = dmam_alloc_coherent(dev, s, &buf_dma,
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL);
if (!buf)
goto err_out;
@@ -2455,11 +2424,9 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
goto err_out;
- hisi_sas_init_mem(hisi_hba);
hisi_sas_slot_index_init(hisi_hba);
- hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
- HISI_SAS_RESERVED_IPTT_CNT;
+ hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
if (!hisi_hba->wq) {
@@ -2610,8 +2577,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hisi_hba->regs = devm_ioremap_resource(dev, res);
+ hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hisi_hba->regs))
goto err_out;
@@ -2672,13 +2638,11 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_channel = 1;
shost->max_cmd_len = 16;
if (hisi_hba->hw->slot_index_alloc) {
- shost->can_queue = hisi_hba->hw->max_command_entries;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ shost->can_queue = HISI_SAS_MAX_COMMANDS;
+ shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
} else {
- shost->can_queue = hisi_hba->hw->max_command_entries -
- HISI_SAS_RESERVED_IPTT_CNT;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
- HISI_SAS_RESERVED_IPTT_CNT;
+ shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
+ shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
}
sha->sas_ha_name = DRV_NAME;
@@ -2769,21 +2733,52 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
{
- u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg;
+ u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_GLOBAL];
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
const struct hisi_sas_debugfs_reg *global =
- hisi_hba->hw->debugfs_reg_global;
+ hw->debugfs_reg_array[DEBUGFS_GLOBAL];
int i;
for (i = 0; i < global->count; i++, databuf++)
*databuf = global->read_global_reg(hisi_hba, 4 * i);
}
+static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
+{
+ u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_AXI];
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ const struct hisi_sas_debugfs_reg *axi =
+ hw->debugfs_reg_array[DEBUGFS_AXI];
+ int i;
+
+ for (i = 0; i < axi->count; i++, databuf++)
+ *databuf = axi->read_global_reg(hisi_hba,
+ 4 * i + axi->base_off);
+}
+
+static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
+{
+ u32 *databuf = hisi_hba->debugfs_regs[DEBUGFS_RAS];
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ const struct hisi_sas_debugfs_reg *ras =
+ hw->debugfs_reg_array[DEBUGFS_RAS];
+ int i;
+
+ for (i = 0; i < ras->count; i++, databuf++)
+ *databuf = ras->read_global_reg(hisi_hba,
+ 4 * i + ras->base_off);
+}
+
static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
{
+ void *cachebuf = hisi_hba->debugfs_itct_cache;
void *databuf = hisi_hba->debugfs_itct;
struct hisi_sas_itct *itct;
int i;
+ hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE,
+ cachebuf);
+
itct = hisi_hba->itct;
for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
@@ -2794,11 +2789,15 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
{
- int max_command_entries = hisi_hba->hw->max_command_entries;
+ int max_command_entries = HISI_SAS_MAX_COMMANDS;
+ void *cachebuf = hisi_hba->debugfs_iost_cache;
void *databuf = hisi_hba->debugfs_iost;
struct hisi_sas_iost *iost;
int i;
+ hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE,
+ cachebuf);
+
iost = hisi_hba->iost;
for (i = 0; i < max_command_entries; i++, iost++) {
@@ -2845,9 +2844,9 @@ static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
{
struct hisi_hba *hisi_hba = s->private;
const struct hisi_sas_hw *hw = hisi_hba->hw;
- const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global;
+ const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
- hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg,
+ hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_GLOBAL],
reg_global, s);
return 0;
@@ -2867,6 +2866,58 @@ static const struct file_operations hisi_sas_debugfs_global_fops = {
.owner = THIS_MODULE,
};
+static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
+
+ hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_AXI],
+ reg_axi, s);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_axi_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_axi_fops = {
+ .open = hisi_sas_debugfs_axi_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
+
+ hisi_sas_debugfs_print_reg(hisi_hba->debugfs_regs[DEBUGFS_RAS],
+ reg_ras, s);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_ras_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_ras_fops = {
+ .open = hisi_sas_debugfs_ras_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
{
struct hisi_sas_phy *phy = s->private;
@@ -2893,8 +2944,8 @@ static const struct file_operations hisi_sas_debugfs_port_fops = {
.owner = THIS_MODULE,
};
-static int hisi_sas_show_row_64(struct seq_file *s, int index,
- int sz, __le64 *ptr)
+static void hisi_sas_show_row_64(struct seq_file *s, int index,
+ int sz, __le64 *ptr)
{
int i;
@@ -2907,12 +2958,10 @@ static int hisi_sas_show_row_64(struct seq_file *s, int index,
}
seq_puts(s, "\n");
-
- return 0;
}
-static int hisi_sas_show_row_32(struct seq_file *s, int index,
- int sz, __le32 *ptr)
+static void hisi_sas_show_row_32(struct seq_file *s, int index,
+ int sz, __le32 *ptr)
{
int i;
@@ -2924,11 +2973,9 @@ static int hisi_sas_show_row_32(struct seq_file *s, int index,
seq_puts(s, "\n\t");
}
seq_puts(s, "\n");
-
- return 0;
}
-static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
+static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
{
struct hisi_sas_cq *cq = cq_ptr;
struct hisi_hba *hisi_hba = cq->hisi_hba;
@@ -2936,20 +2983,18 @@ static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr)
__le32 *complete_hdr = complete_queue +
(hisi_hba->hw->complete_hdr_size * slot);
- return hisi_sas_show_row_32(s, slot,
- hisi_hba->hw->complete_hdr_size,
- complete_hdr);
+ hisi_sas_show_row_32(s, slot,
+ hisi_hba->hw->complete_hdr_size,
+ complete_hdr);
}
static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
{
struct hisi_sas_cq *cq = s->private;
- int slot, ret;
+ int slot;
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- ret = hisi_sas_cq_show_slot(s, slot, cq);
- if (ret)
- return ret;
+ hisi_sas_cq_show_slot(s, slot, cq);
}
return 0;
}
@@ -2967,7 +3012,7 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = {
.owner = THIS_MODULE,
};
-static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
+static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
{
struct hisi_sas_dq *dq = dq_ptr;
struct hisi_hba *hisi_hba = dq->hisi_hba;
@@ -2975,18 +3020,15 @@ static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
__le32 *cmd_hdr = cmd_queue +
sizeof(struct hisi_sas_cmd_hdr) * slot;
- return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr),
- cmd_hdr);
+ hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr);
}
static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
{
- int slot, ret;
+ int slot;
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
- ret = hisi_sas_dq_show_slot(s, slot, s->private);
- if (ret)
- return ret;
+ hisi_sas_dq_show_slot(s, slot, s->private);
}
return 0;
}
@@ -3008,14 +3050,12 @@ static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
{
struct hisi_hba *hisi_hba = s->private;
struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost;
- int i, ret, max_command_entries = hisi_hba->hw->max_command_entries;
- __le64 *iost = &debugfs_iost->qw0;
+ int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
for (i = 0; i < max_command_entries; i++, debugfs_iost++) {
- ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost),
- iost);
- if (ret)
- return ret;
+ __le64 *iost = &debugfs_iost->qw0;
+
+ hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), iost);
}
return 0;
@@ -3034,18 +3074,56 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = {
.owner = THIS_MODULE,
};
+static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_iost_itct_cache *iost_cache =
+ (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_iost_cache;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ int i, tab_idx;
+ __le64 *iost;
+
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
+ /*
+ * Data struct of IOST cache:
+ * Data[1]: BIT0~15: Table index
+ * Bit16: Valid mask
+ * Data[2]~[9]: IOST table
+ */
+ tab_idx = (iost_cache->data[1] & 0xffff);
+ iost = (__le64 *)iost_cache;
+
+ hisi_sas_show_row_64(s, tab_idx, cache_size, iost);
+ }
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_iost_cache_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_iost_cache_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
+ .open = hisi_sas_debugfs_iost_cache_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
{
- int i, ret;
+ int i;
struct hisi_hba *hisi_hba = s->private;
struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct;
- __le64 *itct = &debugfs_itct->qw0;
for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) {
- ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct),
- itct);
- if (ret)
- return ret;
+ __le64 *itct = &debugfs_itct->qw0;
+
+ hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), itct);
}
return 0;
@@ -3064,6 +3142,46 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = {
.owner = THIS_MODULE,
};
+static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ struct hisi_sas_iost_itct_cache *itct_cache =
+ (struct hisi_sas_iost_itct_cache *)hisi_hba->debugfs_itct_cache;
+ u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
+ int i, tab_idx;
+ __le64 *itct;
+
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
+ /*
+ * Data struct of ITCT cache:
+ * Data[1]: BIT0~15: Table index
+ * Bit16: Valid mask
+ * Data[2]~[9]: ITCT table
+ */
+ tab_idx = itct_cache->data[1] & 0xffff;
+ itct = (__le64 *)itct_cache;
+
+ hisi_sas_show_row_64(s, tab_idx, cache_size, itct);
+ }
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_itct_cache_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_itct_cache_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
+ .open = hisi_sas_debugfs_itct_cache_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
{
struct dentry *dump_dentry;
@@ -3110,9 +3228,21 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
debugfs_create_file("iost", 0400, dump_dentry, hisi_hba,
&hisi_sas_debugfs_iost_fops);
+ debugfs_create_file("iost_cache", 0400, dump_dentry, hisi_hba,
+ &hisi_sas_debugfs_iost_cache_fops);
+
debugfs_create_file("itct", 0400, dump_dentry, hisi_hba,
&hisi_sas_debugfs_itct_fops);
+ debugfs_create_file("itct_cache", 0400, dump_dentry, hisi_hba,
+ &hisi_sas_debugfs_itct_cache_fops);
+
+ debugfs_create_file("axi", 0400, dump_dentry, hisi_hba,
+ &hisi_sas_debugfs_axi_fops);
+
+ debugfs_create_file("ras", 0400, dump_dentry, hisi_hba,
+ &hisi_sas_debugfs_ras_fops);
+
return;
}
@@ -3122,6 +3252,8 @@ static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
+ hisi_sas_debugfs_snapshot_axi_reg(hisi_hba);
+ hisi_sas_debugfs_snapshot_ras_reg(hisi_hba);
hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
@@ -3162,6 +3294,382 @@ static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
.owner = THIS_MODULE,
};
+enum {
+ HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
+ HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
+ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
+};
+
+enum {
+ HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
+ HISI_SAS_BIST_CODE_MODE_PRBS23,
+ HISI_SAS_BIST_CODE_MODE_PRBS31,
+ HISI_SAS_BIST_CODE_MODE_JTPAT,
+ HISI_SAS_BIST_CODE_MODE_CJTPAT,
+ HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
+ HISI_SAS_BIST_CODE_MODE_TRAIN,
+ HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
+ HISI_SAS_BIST_CODE_MODE_HFTP,
+ HISI_SAS_BIST_CODE_MODE_MFTP,
+ HISI_SAS_BIST_CODE_MODE_LFTP,
+ HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
+};
+
+static const struct {
+ int value;
+ char *name;
+} hisi_sas_debugfs_loop_linkrate[] = {
+ { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
+ { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
+ { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+ { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
+};
+
+static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
+ int match = (hisi_hba->debugfs_bist_linkrate ==
+ hisi_sas_debugfs_loop_linkrate[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ hisi_sas_debugfs_loop_linkrate[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EOVERFLOW;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EINVAL;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
+ if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name,
+ pkbuf, 16)) {
+ hisi_hba->debugfs_bist_linkrate =
+ hisi_sas_debugfs_loop_linkrate[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_bist_linkrate_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = {
+ .open = hisi_sas_debugfs_bist_linkrate_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_bist_linkrate_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} hisi_sas_debugfs_loop_code_mode[] = {
+ { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
+ { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
+ { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
+ { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
+ { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
+ { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
+ { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
+ { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
+ { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
+ { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
+ { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
+ { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
+};
+
+static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
+ int match = (hisi_hba->debugfs_bist_code_mode ==
+ hisi_sas_debugfs_loop_code_mode[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ hisi_sas_debugfs_loop_code_mode[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EOVERFLOW;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
+ if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name,
+ pkbuf, 16)) {
+ hisi_hba->debugfs_bist_code_mode =
+ hisi_sas_debugfs_loop_code_mode[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_bist_code_mode_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = {
+ .open = hisi_sas_debugfs_bist_code_mode_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_bist_code_mode_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int phy_no;
+ int val;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ val = kstrtouint_from_user(buf, count, 0, &phy_no);
+ if (val)
+ return val;
+
+ if (phy_no >= hisi_hba->n_phy)
+ return -EINVAL;
+
+ hisi_hba->debugfs_bist_phy_no = phy_no;
+
+ return count;
+}
+
+static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_bist_phy_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_bist_phy_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_phy_ops = {
+ .open = hisi_sas_debugfs_bist_phy_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_bist_phy_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static const struct {
+ int value;
+ char *name;
+} hisi_sas_debugfs_loop_modes[] = {
+ { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digial" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
+ { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
+};
+
+static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
+ int match = (hisi_hba->debugfs_bist_mode ==
+ hisi_sas_debugfs_loop_modes[i].value);
+
+ seq_printf(s, "%s%s%s ", match ? "[" : "",
+ hisi_sas_debugfs_loop_modes[i].name,
+ match ? "]" : "");
+ }
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ char kbuf[16] = {}, *pkbuf;
+ bool found = false;
+ int i;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EOVERFLOW;
+
+ pkbuf = strstrip(kbuf);
+
+ for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
+ if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) {
+ hisi_hba->debugfs_bist_mode =
+ hisi_sas_debugfs_loop_modes[i].value;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return count;
+}
+
+static int hisi_sas_debugfs_bist_mode_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_bist_mode_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_mode_ops = {
+ .open = hisi_sas_debugfs_bist_mode_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_bist_mode_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int enable;
+ int val;
+
+ val = kstrtouint_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ if (enable > 1)
+ return -EINVAL;
+
+ if (enable == hisi_hba->debugfs_bist_enable)
+ return count;
+
+ if (!hisi_hba->hw->set_bist)
+ return -EPERM;
+
+ val = hisi_hba->hw->set_bist(hisi_hba, enable);
+ if (val < 0)
+ return val;
+
+ hisi_hba->debugfs_bist_enable = enable;
+
+ return count;
+}
+
+static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_bist_enable_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_bist_enable_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
+ .open = hisi_sas_debugfs_bist_enable_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_bist_enable_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
void hisi_sas_debugfs_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
@@ -3175,89 +3683,165 @@ void hisi_sas_debugfs_work_handler(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
-void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
+void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
{
- int max_command_entries = hisi_hba->hw->max_command_entries;
struct device *dev = hisi_hba->dev;
- int p, i, c, d;
+ int i;
+
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache);
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache);
+ devm_kfree(dev, hisi_hba->debugfs_iost);
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
+
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
+
+ for (i = 0; i < DEBUGFS_REGS_NUM; i++)
+ devm_kfree(dev, hisi_hba->debugfs_regs[i]);
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
+}
+
+int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+{
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ struct device *dev = hisi_hba->dev;
+ int p, c, d;
size_t sz;
- hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
- hisi_sas_debugfs_dir);
- debugfs_create_file("trigger_dump", 0600,
- hisi_hba->debugfs_dir,
- hisi_hba,
- &hisi_sas_debugfs_trigger_dump_fops);
+ hisi_hba->debugfs_dump_dentry =
+ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
- /* Alloc buffer for global */
- sz = hisi_hba->hw->debugfs_reg_global->count * 4;
- hisi_hba->debugfs_global_reg =
- devm_kmalloc(dev, sz, GFP_KERNEL);
+ sz = hw->debugfs_reg_array[DEBUGFS_GLOBAL]->count * 4;
+ hisi_hba->debugfs_regs[DEBUGFS_GLOBAL] =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
- if (!hisi_hba->debugfs_global_reg)
- goto fail_global;
+ if (!hisi_hba->debugfs_regs[DEBUGFS_GLOBAL])
+ goto fail;
- /* Alloc buffer for port */
- sz = hisi_hba->hw->debugfs_reg_port->count * 4;
+ sz = hw->debugfs_reg_port->count * 4;
for (p = 0; p < hisi_hba->n_phy; p++) {
hisi_hba->debugfs_port_reg[p] =
devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_port_reg[p])
- goto fail_port;
+ goto fail;
}
- /* Alloc buffer for cq */
- sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
+ sz = hw->debugfs_reg_array[DEBUGFS_AXI]->count * 4;
+ hisi_hba->debugfs_regs[DEBUGFS_AXI] =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+
+ if (!hisi_hba->debugfs_regs[DEBUGFS_AXI])
+ goto fail;
+
+ sz = hw->debugfs_reg_array[DEBUGFS_RAS]->count * 4;
+ hisi_hba->debugfs_regs[DEBUGFS_RAS] =
+ devm_kmalloc(dev, sz, GFP_KERNEL);
+
+ if (!hisi_hba->debugfs_regs[DEBUGFS_RAS])
+ goto fail;
+
+ sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
for (c = 0; c < hisi_hba->queue_count; c++) {
hisi_hba->debugfs_complete_hdr[c] =
devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_complete_hdr[c])
- goto fail_cq;
+ goto fail;
}
- /* Alloc buffer for dq */
sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
for (d = 0; d < hisi_hba->queue_count; d++) {
hisi_hba->debugfs_cmd_hdr[d] =
devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_cmd_hdr[d])
- goto fail_iost_dq;
+ goto fail;
}
- /* Alloc buffer for iost */
- sz = max_command_entries * sizeof(struct hisi_sas_iost);
+ sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_iost)
- goto fail_iost_dq;
+ goto fail;
+
+ sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+ sizeof(struct hisi_sas_iost_itct_cache);
+
+ hisi_hba->debugfs_iost_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_iost_cache)
+ goto fail;
+
+ sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
+ sizeof(struct hisi_sas_iost_itct_cache);
+
+ hisi_hba->debugfs_itct_cache = devm_kmalloc(dev, sz, GFP_KERNEL);
+ if (!hisi_hba->debugfs_itct_cache)
+ goto fail;
- /* Alloc buffer for itct */
/* New memory allocation must be locate before itct */
sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL);
if (!hisi_hba->debugfs_itct)
- goto fail_itct;
+ goto fail;
- return;
-fail_itct:
- devm_kfree(dev, hisi_hba->debugfs_iost);
-fail_iost_dq:
- for (i = 0; i < d; i++)
- devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]);
-fail_cq:
- for (i = 0; i < c; i++)
- devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]);
-fail_port:
- for (i = 0; i < p; i++)
- devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
- devm_kfree(dev, hisi_hba->debugfs_global_reg);
-fail_global:
- debugfs_remove_recursive(hisi_hba->debugfs_dir);
- dev_dbg(dev, "failed to init debugfs!\n");
+ return 0;
+fail:
+ hisi_sas_debugfs_release(hisi_hba);
+ return -ENOMEM;
+}
+
+void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
+{
+ hisi_hba->debugfs_bist_dentry =
+ debugfs_create_dir("bist", hisi_hba->debugfs_dir);
+ debugfs_create_file("link_rate", 0600,
+ hisi_hba->debugfs_bist_dentry, hisi_hba,
+ &hisi_sas_debugfs_bist_linkrate_ops);
+
+ debugfs_create_file("code_mode", 0600,
+ hisi_hba->debugfs_bist_dentry, hisi_hba,
+ &hisi_sas_debugfs_bist_code_mode_ops);
+
+ debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
+
+ debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_cnt);
+
+ debugfs_create_file("loopback_mode", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &hisi_sas_debugfs_bist_mode_ops);
+
+ debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
+
+ hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+}
+
+void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+
+ hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
+ hisi_sas_debugfs_dir);
+ debugfs_create_file("trigger_dump", 0600,
+ hisi_hba->debugfs_dir,
+ hisi_hba,
+ &hisi_sas_debugfs_trigger_dump_fops);
+
+ /* create bist structures */
+ hisi_sas_debugfs_bist_init(hisi_hba);
+
+ if (hisi_sas_debugfs_alloc(hisi_hba)) {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+ dev_dbg(dev, "failed to init debugfs!\n");
+ }
}
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3912216e8a4f..b861a0f14c9d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -401,8 +401,6 @@ enum {
TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */
};
-#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192
-
#define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS)
#define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES)
#define HISI_SAS_FATAL_INT_NR (2)
@@ -418,13 +416,6 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
return readl(regs);
}
-static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
-{
- void __iomem *regs = hisi_hba->regs + off;
-
- return readl_relaxed(regs);
-}
-
static void hisi_sas_write32(struct hisi_hba *hisi_hba,
u32 off, u32 val)
{
@@ -866,30 +857,6 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
return bitmap;
}
-/*
- * The callpath to this function and upto writing the write
- * queue pointer should be safe from interruption.
- */
-static int
-get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
-{
- struct device *dev = hisi_hba->dev;
- int queue = dq->id;
- u32 r, w;
-
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- dev_warn(dev, "could not find free slot\n");
- return -EAGAIN;
- }
-
- dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
-
- return w;
-}
-
/* DQ lock must be taken here */
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
@@ -1308,21 +1275,17 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
}
case SAS_PROTOCOL_SMP:
{
- void *to;
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+ void *to = page_address(sg_page(sg_resp));
ts->stat = SAM_STAT_GOOD;
- to = kmap_atomic(sg_page(sg_resp));
- dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
- DMA_FROM_DEVICE);
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
- sg_dma_len(sg_resp));
- kunmap_atomic(to);
+ sg_resp->length);
break;
}
case SAS_PROTOCOL_SATA:
@@ -1534,11 +1497,9 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
struct hisi_sas_complete_v1_hdr *complete_queue =
(struct hisi_sas_complete_v1_hdr *)
hisi_hba->complete_hdr[queue];
- u32 irq_value, rd_point = cq->rd_point, wr_point;
+ u32 rd_point = cq->rd_point, wr_point;
spin_lock(&hisi_hba->lock);
- irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
-
hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
wr_point = hisi_sas_read32(hisi_hba,
COMPL_Q_0_WR_PTR + (0x14 * queue));
@@ -1820,9 +1781,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.clear_itct = clear_itct_v1_hw,
.prep_smp = prep_smp_v1_hw,
.prep_ssp = prep_ssp_v1_hw,
- .get_free_slot = get_free_slot_v1_hw,
.start_delivery = start_delivery_v1_hw,
- .slot_complete = slot_complete_v1_hw,
.phys_init = phys_init_v1_hw,
.phy_start = start_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
@@ -1830,7 +1789,6 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.phy_set_linkrate = phy_set_linkrate_v1_hw,
.phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
- .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
.sht = &sht_v1_hw,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e9b15d45f98f..8e96a257e439 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1637,31 +1637,6 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
return bitmap;
}
-/*
- * The callpath to this function and upto writing the write
- * queue pointer should be safe from interruption.
- */
-static int
-get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
-{
- struct device *dev = hisi_hba->dev;
- int queue = dq->id;
- u32 r, w;
-
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- dev_warn(dev, "full queue=%d r=%d w=%d\n",
- queue, r, w);
- return -EAGAIN;
- }
-
- dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
-
- return w;
-}
-
/* DQ lock must be taken here */
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
@@ -2418,7 +2393,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v2_hw(hisi_hba, task, slot, 2);
if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
slot->idx, task, sas_dev->device_id,
complete_hdr->dw0, complete_hdr->dw1,
complete_hdr->act, complete_hdr->dw3,
@@ -2444,20 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
case SAS_PROTOCOL_SMP:
{
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
- void *to;
+ void *to = page_address(sg_page(sg_resp));
ts->stat = SAM_STAT_GOOD;
- to = kmap_atomic(sg_page(sg_resp));
- dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
- DMA_FROM_DEVICE);
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
- sg_dma_len(sg_resp));
- kunmap_atomic(to);
+ sg_resp->length);
break;
}
case SAS_PROTOCOL_SATA:
@@ -2484,7 +2455,7 @@ out:
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- dev_info(dev, "slot complete: task(%p) aborted\n", task);
+ dev_info(dev, "slot complete: task(%pK) aborted\n", task);
return SAS_ABORTED_TASK;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
@@ -2495,7 +2466,7 @@ out:
spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags);
- dev_info(dev, "slot complete: task(%p) ignored\n",
+ dev_info(dev, "slot complete: task(%pK) ignored\n",
task);
return sts;
}
@@ -2563,7 +2534,10 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
hdr->dw1 = cpu_to_le32(dw1);
/* dw2 */
- if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
+ if (task->ata_task.use_ncq) {
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ hdr_tag = qc->tag;
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
@@ -3333,8 +3307,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
- int irq, rc, irq_map[128];
- int i, phy_no, fatal_no, queue_no, k;
+ int irq, rc = 0, irq_map[128];
+ int i, phy_no, fatal_no, queue_no;
for (i = 0; i < 128; i++)
irq_map[i] = platform_get_irq(pdev, i);
@@ -3347,7 +3321,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
- goto free_phy_int_irqs;
+ goto err_out;
}
}
@@ -3361,7 +3335,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
- goto free_sata_int_irqs;
+ goto err_out;
}
}
@@ -3373,7 +3347,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
- goto free_fatal_int_irqs;
+ goto err_out;
}
}
@@ -3388,34 +3362,14 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
irq, rc);
rc = -ENOENT;
- goto free_cq_int_irqs;
+ goto err_out;
}
tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
}
hisi_hba->cq_nvecs = hisi_hba->queue_count;
- return 0;
-
-free_cq_int_irqs:
- for (k = 0; k < queue_no; k++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[k];
-
- free_irq(irq_map[k + 96], cq);
- tasklet_kill(&cq->tasklet);
- }
-free_fatal_int_irqs:
- for (k = 0; k < fatal_no; k++)
- free_irq(irq_map[k + 81], hisi_hba);
-free_sata_int_irqs:
- for (k = 0; k < phy_no; k++) {
- struct hisi_sas_phy *phy = &hisi_hba->phy[k];
-
- free_irq(irq_map[k + 72], phy);
- }
-free_phy_int_irqs:
- for (k = 0; k < i; k++)
- free_irq(irq_map[k + 1], hisi_hba);
+err_out:
return rc;
}
@@ -3544,8 +3498,8 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
return 0;
}
-static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
- int delay_ms, int timeout_ms)
+static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms)
{
struct device *dev = hisi_hba->dev;
int entries, entries_old = 0, time;
@@ -3559,12 +3513,13 @@ static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba,
msleep(delay_ms);
}
- if (time >= timeout_ms)
- return -ETIMEDOUT;
+ if (time >= timeout_ms) {
+ dev_dbg(dev, "Wait commands complete timeout!\n");
+ return;
+ }
dev_dbg(dev, "wait commands complete %dms\n", time);
- return 0;
}
static struct device_attribute *host_attrs_v2_hw[] = {
@@ -3606,9 +3561,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.prep_ssp = prep_ssp_v2_hw,
.prep_stp = prep_ata_v2_hw,
.prep_abort = prep_abort_v2_hw,
- .get_free_slot = get_free_slot_v2_hw,
.start_delivery = start_delivery_v2_hw,
- .slot_complete = slot_complete_v2_hw,
.phys_init = phys_init_v2_hw,
.phy_start = start_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
@@ -3616,7 +3569,6 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.get_events = phy_get_events_v2_hw,
.phy_set_linkrate = phy_set_linkrate_v2_hw,
.phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
- .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 5f0f6df11adf..cb8d087762db 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -71,6 +71,7 @@
#define HGC_DQE_ECC_MB_ADDR_OFF 16
#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
#define CHNL_INT_STATUS 0x148
+#define TAB_DFX 0x14c
#define HGC_ITCT_ECC_ADDR 0x150
#define HGC_ITCT_ECC_1B_ADDR_OFF 0
#define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \
@@ -83,6 +84,7 @@
#define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
#define FIFO_ERR_INFO_OFF 8
#define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
+#define TAB_RD_TYPE 0x15c
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
@@ -189,12 +191,30 @@
#define PHY_CFG_PHY_RST_OFF 3
#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
+#define CFG_PROG_PHY_LINK_RATE_OFF 8
+#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF)
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
#define CMD_HDR_PIR_OFF 8
#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
#define SERDES_CFG (PORT_BASE + 0x1c)
+#define CFG_ALOS_CHK_DISABLE_OFF 9
+#define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF)
+#define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c)
+#define CFG_BIST_MODE_SEL_OFF 0
+#define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF)
+#define CFG_LOOP_TEST_MODE_OFF 14
+#define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF)
+#define CFG_RX_BIST_EN_OFF 16
+#define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF)
+#define CFG_TX_BIST_EN_OFF 17
+#define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF)
+#define CFG_BIST_TEST_OFF 18
+#define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF)
+#define SAS_PHY_BIST_CODE (PORT_BASE + 0x30)
+#define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34)
+#define SAS_BIST_ERR_CNT (PORT_BASE + 0x38)
#define SL_CFG (PORT_BASE + 0x84)
#define AIP_LIMIT (PORT_BASE + 0x90)
#define SL_CONTROL (PORT_BASE + 0x94)
@@ -499,13 +519,6 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
return readl(regs);
}
-static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
-{
- void __iomem *regs = hisi_hba->regs + off;
-
- return readl_relaxed(regs);
-}
-
static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
{
void __iomem *regs = hisi_hba->regs + off;
@@ -1006,31 +1019,6 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
return bitmap;
}
-/**
- * The callpath to this function and upto writing the write
- * queue pointer should be safe from interruption.
- */
-static int
-get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
-{
- struct device *dev = hisi_hba->dev;
- int queue = dq->id;
- u32 r, w;
-
- w = dq->wr_point;
- r = hisi_sas_read32_relaxed(hisi_hba,
- DLVRY_Q_0_RD_PTR + (queue * 0x14));
- if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
- dev_warn(dev, "full queue=%d r=%d w=%d\n",
- queue, r, w);
- return -EAGAIN;
- }
-
- dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
-
- return w;
-}
-
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
{
struct hisi_hba *hisi_hba = dq->hisi_hba;
@@ -1386,7 +1374,10 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
hdr->dw1 = cpu_to_le32(dw1);
/* dw2 */
- if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
+ if (task->ata_task.use_ncq) {
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ hdr_tag = qc->tag;
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
@@ -1944,7 +1935,7 @@ static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba)
u32 irq_value, irq_msk;
irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
- hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
if (irq_value)
@@ -2220,7 +2211,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
slot_err_v3_hw(hisi_hba, task, slot);
if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
slot->idx, task, sas_dev->device_id,
dw0, dw1, complete_hdr->act, dw3,
error_info[0], error_info[1],
@@ -2241,20 +2232,16 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
case SAS_PROTOCOL_SMP: {
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
- void *to;
+ void *to = page_address(sg_page(sg_resp));
ts->stat = SAM_STAT_GOOD;
- to = kmap_atomic(sg_page(sg_resp));
- dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
- DMA_FROM_DEVICE);
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
- sg_dma_len(sg_resp));
- kunmap_atomic(to);
+ sg_resp->length);
break;
}
case SAS_PROTOCOL_SATA:
@@ -2279,7 +2266,7 @@ out:
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- dev_info(dev, "slot complete: task(%p) aborted\n", task);
+ dev_info(dev, "slot complete: task(%pK) aborted\n", task);
return SAS_ABORTED_TASK;
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
@@ -2290,7 +2277,7 @@ out:
spin_lock_irqsave(&device->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state)) {
spin_unlock_irqrestore(&device->done_lock, flags);
- dev_info(dev, "slot complete: task(%p) ignored\n ",
+ dev_info(dev, "slot complete: task(%pK) ignored\n ",
task);
return sts;
}
@@ -2385,8 +2372,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
struct pci_dev *pdev = hisi_hba->pci_dev;
- int vectors, rc;
- int i, k;
+ int vectors, rc, i;
int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
if (auto_affine_msi_experimental) {
@@ -2434,7 +2420,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
rc = -ENOENT;
- goto free_phy_irq;
+ goto free_irq_vectors;
}
rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
@@ -2443,7 +2429,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
if (rc) {
dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
rc = -ENOENT;
- goto free_chnl_interrupt;
+ goto free_irq_vectors;
}
/* Init tasklets for cq only */
@@ -2460,7 +2446,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
i, rc);
rc = -ENOENT;
- goto free_cq_irqs;
+ goto free_irq_vectors;
}
tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
@@ -2468,18 +2454,6 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
return 0;
-free_cq_irqs:
- for (k = 0; k < i; k++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[k];
- int nr = hisi_sas_intr_conv ? 16 : 16 + k;
-
- free_irq(pci_irq_vector(pdev, nr), cq);
- }
- free_irq(pci_irq_vector(pdev, 11), hisi_hba);
-free_chnl_interrupt:
- free_irq(pci_irq_vector(pdev, 2), hisi_hba);
-free_phy_irq:
- free_irq(pci_irq_vector(pdev, 1), hisi_hba);
free_irq_vectors:
pci_free_irq_vectors(pdev);
return rc;
@@ -2620,8 +2594,8 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
return 0;
}
-static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
- int delay_ms, int timeout_ms)
+static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms)
{
struct device *dev = hisi_hba->dev;
int entries, entries_old = 0, time;
@@ -2635,12 +2609,12 @@ static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
msleep(delay_ms);
}
- if (time >= timeout_ms)
- return -ETIMEDOUT;
+ if (time >= timeout_ms) {
+ dev_dbg(dev, "Wait commands complete timeout!\n");
+ return;
+ }
dev_dbg(dev, "wait commands complete %dms\n", time);
-
- return 0;
}
static ssize_t intr_conv_v3_hw_show(struct device *dev,
@@ -2887,16 +2861,45 @@ static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
.read_global_reg = hisi_sas_read32,
};
+static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS),
+ HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS),
+ HISI_SAS_DEBUGFS_REG(AXI_CFG),
+ HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR),
+ {}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_axi_reg = {
+ .lu = debugfs_axi_reg_lu,
+ .count = 0x61,
+ .base_off = AXI_MASTER_CFG_BASE,
+ .read_global_reg = hisi_sas_read32,
+};
+
+static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = {
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1),
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK),
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK),
+ HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK),
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2),
+ HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK),
+ {}
+};
+
+static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
+ .lu = debugfs_ras_reg_lu,
+ .count = 0x10,
+ .base_off = RAS_BASE,
+ .read_global_reg = hisi_sas_read32,
+};
+
static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = hisi_hba->dev;
-
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
- if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT)
- dev_dbg(dev, "Wait commands complete timeout!\n");
+ wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
hisi_sas_kill_tasklets(hisi_hba);
}
@@ -2909,6 +2912,142 @@ static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
}
+static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
+ enum hisi_sas_debugfs_cache_type type,
+ u32 *cache)
+{
+ u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ *
+ HISI_SAS_IOST_ITCT_CACHE_NUM;
+ u32 *buf = cache;
+ u32 i, val;
+
+ hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type);
+
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) {
+ val = hisi_sas_read32(hisi_hba, TAB_DFX);
+ if (val == 0xffffffff)
+ break;
+ }
+
+ if (val != 0xffffffff) {
+ pr_err("Issue occur when reading IOST/ITCT cache!\n");
+ return;
+ }
+
+ memset(buf, 0, cache_dw_size * 4);
+ buf[0] = val;
+
+ for (i = 1; i < cache_dw_size; i++)
+ buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX);
+}
+
+static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba)
+{
+ u32 reg_val;
+ int phy_id = hisi_hba->debugfs_bist_phy_no;
+
+ /* disable PHY */
+ hisi_sas_phy_enable(hisi_hba, phy_id, 0);
+
+ /* disable ALOS */
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val |= CFG_ALOS_CHK_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+}
+
+static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
+{
+ u32 reg_val;
+ int phy_id = hisi_hba->debugfs_bist_phy_no;
+
+ /* disable loopback */
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL);
+ reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
+ CFG_BIST_TEST_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val);
+
+ /* enable ALOS */
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+
+ /* restore the linkrate */
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE);
+ /* init OOB link rate as 1.5 Gbits */
+ reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
+ reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val);
+
+ /* enable PHY */
+ hisi_sas_phy_enable(hisi_hba, phy_id, 1);
+}
+
+#define SAS_PHY_BIST_CODE_INIT 0x1
+#define SAS_PHY_BIST_CODE1_INIT 0X80
+static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
+{
+ u32 reg_val, mode_tmp;
+ u32 linkrate = hisi_hba->debugfs_bist_linkrate;
+ u32 phy_id = hisi_hba->debugfs_bist_phy_no;
+ u32 code_mode = hisi_hba->debugfs_bist_code_mode;
+ u32 path_mode = hisi_hba->debugfs_bist_mode;
+ struct device *dev = hisi_hba->dev;
+
+ dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
+ linkrate, phy_id, code_mode, path_mode);
+ mode_tmp = path_mode ? 2 : 1;
+ if (enable) {
+ /* some preparations before bist test */
+ hisi_sas_bist_test_prep_v3_hw(hisi_hba);
+
+ /* set linkrate of bit test*/
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ PROG_PHY_LINK_RATE);
+ reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
+ reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ PROG_PHY_LINK_RATE, reg_val);
+
+ /* set code mode of bit test */
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CTRL);
+ reg_val &= ~(CFG_BIST_MODE_SEL_MSK |
+ CFG_LOOP_TEST_MODE_MSK |
+ CFG_RX_BIST_EN_MSK |
+ CFG_TX_BIST_EN_MSK |
+ CFG_BIST_TEST_MSK);
+ reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) |
+ (mode_tmp << CFG_LOOP_TEST_MODE_OFF) |
+ CFG_BIST_TEST_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CTRL, reg_val);
+
+ mdelay(100);
+ reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CTRL, reg_val);
+
+ /* set the bist init value */
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CODE,
+ SAS_PHY_BIST_CODE_INIT);
+ hisi_sas_phy_write32(hisi_hba, phy_id,
+ SAS_PHY_BIST_CODE1,
+ SAS_PHY_BIST_CODE1_INIT);
+
+ /* clear error bit */
+ mdelay(100);
+ hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
+ } else {
+ /* disable bist test and recover it */
+ hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba,
+ phy_id, SAS_BIST_ERR_CNT);
+ hisi_sas_bist_test_restore_v3_hw(hisi_hba);
+ }
+
+ return 0;
+}
+
static struct scsi_host_template sht_v3_hw = {
.name = DRV_NAME,
.module = THIS_MODULE,
@@ -2935,7 +3074,6 @@ static struct scsi_host_template sht_v3_hw = {
static const struct hisi_sas_hw hisi_sas_v3_hw = {
.hw_init = hisi_sas_v3_init,
.setup_itct = setup_itct_v3_hw,
- .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
.clear_itct = clear_itct_v3_hw,
@@ -2944,9 +3082,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.prep_smp = prep_smp_v3_hw,
.prep_stp = prep_ata_v3_hw,
.prep_abort = prep_abort_v3_hw,
- .get_free_slot = get_free_slot_v3_hw,
.start_delivery = start_delivery_v3_hw,
- .slot_complete = slot_complete_v3_hw,
.phys_init = phys_init_v3_hw,
.phy_start = start_phy_v3_hw,
.phy_disable = disable_phy_v3_hw,
@@ -2959,10 +3095,14 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.get_events = phy_get_events_v3_hw,
.write_gpio = write_gpio_v3_hw,
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
- .debugfs_reg_global = &debugfs_global_reg,
+ .debugfs_reg_array[DEBUGFS_GLOBAL] = &debugfs_global_reg,
+ .debugfs_reg_array[DEBUGFS_AXI] = &debugfs_axi_reg,
+ .debugfs_reg_array[DEBUGFS_RAS] = &debugfs_ras_reg,
.debugfs_reg_port = &debugfs_port_reg,
.snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
.snapshot_restore = debugfs_snapshot_restore_v3_hw,
+ .read_iost_itct_cache = read_iost_itct_cache_v3_hw,
+ .set_bist = debugfs_set_bist_v3_hw,
};
static struct Scsi_Host *
@@ -2993,8 +3133,6 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
else
hisi_hba->prot_mask = prot_mask;
- timer_setup(&hisi_hba->timer, NULL, 0);
-
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
@@ -3076,17 +3214,14 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_cmd_len = 16;
- shost->can_queue = hisi_hba->hw->max_command_entries -
- HISI_SAS_RESERVED_IPTT_CNT;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
- HISI_SAS_RESERVED_IPTT_CNT;
+ shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
+ shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
- sha->core.shost = hisi_hba->shost;
for (i = 0; i < hisi_hba->n_phy; i++) {
sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
@@ -3273,15 +3408,21 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
rc = pci_enable_device(pdev);
- if (rc)
+ if (rc) {
dev_err(dev, "enable device failed during resume (%d)\n", rc);
+ return rc;
+ }
pci_set_master(pdev);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
sas_prep_resume_ha(sha);
- init_reg_v3_hw(hisi_hba);
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc) {
+ scsi_remove_host(shost);
+ pci_disable_device(pdev);
+ }
hisi_hba->hw->phys_init(hisi_hba);
sas_resume_ha(sha);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 8cdbac076a1b..df897df5cafe 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1830,6 +1830,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
(bsg_request->rqst_data.h_els.port_id[1] << 8) |
bsg_request->rqst_data.h_els.port_id[2];
+ /* fall through */
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
@@ -1838,6 +1839,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
bsg_request->rqst_data.h_ct.port_id[2];
+ /* fall through */
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
@@ -4020,6 +4022,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
return;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_host_init(vhost);
+ /* fall through */
case IBMVFC_MAD_DRIVER_FAILED:
ibmvfc_free_event(evt);
return;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 7f9535392a93..a929fe76102b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1581,6 +1581,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
case H_PERMISSION:
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
+ /* Fall through */
default:
dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
rc);
@@ -2492,8 +2493,10 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
break;
case H_CLOSED:
vscsi->flags |= CLIENT_FAILED;
+ /* Fall through */
case H_DROPPED:
vscsi->flags |= RESPONSE_Q_DOWN;
+ /* Fall through */
case H_REMOTE_PARM:
dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
rc);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index bade2e025ecf..691acbdcc46d 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -51,6 +51,8 @@ struct lpfc_sli2_slim;
cmnd for menlo needs nearly twice as for firmware
downloads using bsg */
+#define LPFC_DEFAULT_XPSGL_SIZE 256
+#define LPFC_MAX_SG_TABLESIZE 0xffff
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
@@ -732,14 +734,13 @@ struct lpfc_hba {
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
-#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
+#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
@@ -795,10 +796,12 @@ struct lpfc_hba {
uint8_t mds_diags_support;
uint8_t bbcredit_support;
uint8_t enab_exp_wqcq_pages;
+ u8 nsler; /* Firmware supports FC-NVMe-2 SLER */
/* HBA Config Parameters */
uint32_t cfg_ack0;
uint32_t cfg_xri_rebalancing;
+ uint32_t cfg_xpsgl;
uint32_t cfg_enable_npiv;
uint32_t cfg_enable_rrq;
uint32_t cfg_topology;
@@ -905,6 +908,7 @@ struct lpfc_hba {
wait_queue_head_t work_waitq;
struct task_struct *worker_thread;
unsigned long data_flags;
+ uint32_t border_sge_num;
uint32_t hbq_in_use; /* HBQs in use flag */
uint32_t hbq_count; /* Count of configured HBQs */
@@ -987,6 +991,7 @@ struct lpfc_hba {
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
struct dma_pool *txrdy_payload_pool;
+ struct dma_pool *lpfc_cmd_rsp_buf_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@@ -1034,8 +1039,6 @@ struct lpfc_hba {
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
- struct dentry *debug_dumpData; /* BlockGuard BPL */
- struct dentry *debug_dumpDif; /* BlockGuard BPL */
struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d65558619ab0..25aa7a53d255 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -841,7 +841,8 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
lpfc_vpd_t *vp = &phba->vpd;
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
- return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
+ return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
+ vp->rev.smRev, vp->rev.smFwRev);
}
/**
@@ -3682,8 +3683,8 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (rport)
remoteport = rport->remoteport;
spin_unlock(&vport->phba->hbalock);
- if (remoteport)
- nvme_fc_set_remoteport_devloss(rport->remoteport,
+ if (rport && remoteport)
+ nvme_fc_set_remoteport_devloss(remoteport,
vport->cfg_devloss_tmo);
#endif
}
@@ -5467,15 +5468,12 @@ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
* lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
* For the Initiator (I), enabling this parameter means that an NVMET
* PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
- * processed by the initiator for subsequent NVME FCP IO. For the target
- * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
- * driver parameter as the target function's first burst size returned to the
- * initiator in the target's NVME PRLI response. Parameter supported on physical
- * port only - no NPIV support.
+ * processed by the initiator for subsequent NVME FCP IO.
+ * Currently, this feature is not supported on the NVME target
* Value range is [0,1]. Default value is 0 (disabled).
*/
LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
- "Enable First Burst feature on I and T functions.");
+ "Enable First Burst feature for NVME Initiator.");
/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
@@ -5927,7 +5925,7 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
* 1 = MDS Diagnostics enabled
* Value range is [0,1]. Default value is 0.
*/
-LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
+LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
/*
* lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
@@ -6859,10 +6857,31 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
static void
lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
+ struct lpfc_rport_data *rdata = rport->dd_data;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_nvme_rport *nrport = NULL;
+#endif
+
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ dev_info(&rport->dev, "Cannot find remote node to "
+ "set rport dev loss tmo, port_id x%x\n",
+ rport->port_id);
+ return;
+ }
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ nrport = lpfc_ndlp_get_nrport(ndlp);
+
+ if (nrport && nrport->remoteport)
+ nvme_fc_set_remoteport_devloss(nrport->remoteport,
+ rport->dev_loss_tmo);
+#endif
}
/**
@@ -7059,6 +7078,21 @@ struct fc_function_template lpfc_vport_transport_functions = {
};
/**
+ * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
+ * Mode
+ * @phba: lpfc_hba pointer.
+ **/
+static void
+lpfc_get_hba_function_mode(struct lpfc_hba *phba)
+{
+ /* If it's a SkyHawk FCoE adapter */
+ if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
+ phba->hba_flag |= HBA_FCOE_MODE;
+ else
+ phba->hba_flag &= ~HBA_FCOE_MODE;
+}
+
+/**
* lpfc_get_cfgparam - Used during probe_one to init the adapter structure
* @phba: lpfc_hba pointer.
**/
@@ -7114,8 +7148,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
else
phba->cfg_poll = lpfc_poll;
- if (phba->cfg_enable_bg)
+ /* Get the function mode */
+ lpfc_get_hba_function_mode(phba);
+
+ /* BlockGuard allowed for FC only. */
+ if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "0581 BlockGuard feature not supported\n");
+ /* If set, clear the BlockGuard support param */
+ phba->cfg_enable_bg = 0;
+ } else if (phba->cfg_enable_bg) {
phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
+ }
lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
@@ -7175,16 +7219,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
-
- /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
- * accommodate 512K and 1M IOs in a single nvme buf and supply
- * enough NVME LS iocb buffers for larger connectivity counts.
- */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- phba->cfg_iocb_cnt = 5;
- }
-
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index b7216d694bff..39a736b887b1 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1040,7 +1040,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!dmabuf) {
lpfc_printf_log(phba, KERN_ERR,
LOG_LIBDFC, "2616 No dmabuf "
- "found for iocbq 0x%p\n",
+ "found for iocbq x%px\n",
iocbq);
kfree(evt_dat->data);
kfree(evt_dat);
@@ -1276,9 +1276,7 @@ lpfc_bsg_hba_set_event(struct bsg_job *job)
return 0; /* call job done later */
job_error:
- if (dd_data != NULL)
- kfree(dd_data);
-
+ kfree(dd_data);
job->dd_data = NULL;
return rc;
}
@@ -1571,7 +1569,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
- ctiocb->iocb_cmpl = NULL;
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
ctiocb->context1 = dd_data;
@@ -5451,7 +5448,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5530,8 +5529,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5591,7 +5591,9 @@ ras_job_error:
bsg_reply->result = rc;
/* complete the job back to userspace */
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5673,7 +5675,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
ras_job_error:
bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
@@ -5744,8 +5748,9 @@ lpfc_get_trunk_info(struct bsg_job *job)
phba->sli4_hba.link_state.logical_speed / 1000;
job_error:
bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
+ if (!rc)
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 68e9f96242d3..b2ad8c750486 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
-void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
+void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_dmabuf *);
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
@@ -433,16 +433,6 @@ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
uint16_t *, uint16_t *);
-/* externs BlockGuard */
-extern char *_dump_buf_data;
-extern unsigned long _dump_buf_data_order;
-extern char *_dump_buf_dif;
-extern unsigned long _dump_buf_dif_order;
-extern spinlock_t _dump_buf_lock;
-extern int _dump_buf_done;
-extern spinlock_t pgcnt_lock;
-extern unsigned int pgcnt;
-
/* Interface exported by fabric iocb scheduler */
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
void lpfc_fabric_abort_hba(struct lpfc_hba *);
@@ -595,6 +585,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
+void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index ec72c39997d2..25e86706e207 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -462,6 +462,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
struct lpfc_nodelist *ndlp;
if ((vport->port_type != LPFC_NPIV_PORT) ||
+ (fc4_type == FC_TYPE_FCP) ||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
ndlp = lpfc_setup_disc_node(vport, Did);
@@ -480,10 +481,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0238 Process x%06x NameServer Rsp "
- "Data: x%x x%x x%x x%x\n", Did,
+ "Data: x%x x%x x%x x%x x%x\n", Did,
ndlp->nlp_flag, ndlp->nlp_fc4_type,
- vport->fc_flag,
+ ndlp->nlp_state, vport->fc_flag,
vport->fc_rscn_id_cnt);
+
+ /* if ndlp needs to be discovered and prior
+ * state of ndlp hit devloss, change state to
+ * allow rediscovery.
+ */
+ if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
+ ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
@@ -491,9 +502,9 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0239 Skip x%06x NameServer Rsp "
- "Data: x%x x%x\n", Did,
- vport->fc_flag,
- vport->fc_rscn_id_cnt);
+ "Data: x%x x%x %p\n",
+ Did, vport->fc_flag,
+ vport->fc_rscn_id_cnt, ndlp);
}
} else {
if (!(vport->fc_flag & FC_RSCN_MODE) ||
@@ -751,9 +762,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0208 NameServer Rsp Data: x%x x%x\n",
+ "0208 NameServer Rsp Data: x%x x%x "
+ "sz x%x\n",
vport->fc_flag,
- CTreq->un.gid.Fc4Type);
+ CTreq->un.gid.Fc4Type,
+ irsp->un.genreq64.bdl.bdeSize);
lpfc_ns_rsp(vport,
outp,
@@ -814,6 +827,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
vport->gidft_inp--;
}
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "4216 GID_FT cmpl inp %d disc %d\n",
+ vport->gidft_inp, vport->num_disc_nodes);
+
/* Link up / RSCN discovery */
if ((vport->num_disc_nodes == 0) &&
(vport->gidft_inp == 0)) {
@@ -1209,14 +1227,34 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "3064 Setting ndlp %p, DID x%06x with "
- "FC4 x%08x, Data: x%08x x%08x\n",
+ "3064 Setting ndlp x%px, DID x%06x "
+ "with FC4 x%08x, Data: x%08x x%08x "
+ "%d\n",
ndlp, did, ndlp->nlp_fc4_type,
- FC_TYPE_FCP, FC_TYPE_NVME);
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
-
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ FC_TYPE_FCP, FC_TYPE_NVME,
+ ndlp->nlp_state);
+
+ if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
+ ndlp->nlp_fc4_type) {
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ } else if (!ndlp->nlp_fc4_type) {
+ /* If fc4 type is still unknown, then LOGO */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "6443 Sending LOGO ndlp x%px,"
+ "DID x%06x with fc4_type: "
+ "x%08x, state: %d\n",
+ ndlp, did, ndlp->nlp_fc4_type,
+ ndlp->nlp_state);
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_NPR_NODE);
+ }
}
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
@@ -2515,7 +2553,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
+ ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
(uint32_t) hsp->cmn.bbRcvSizeLsb;
ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
size = FOURBYTES + sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 1ee857d9d165..8d34be60d379 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -361,7 +361,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
len += scnprintf(buf+len, size-len,
- "Buf%d: %p %06x\n", i,
+ "Buf%d: x%px %06x\n", i,
hbq_buf->dbuf.virt, hbq_buf->tag);
found = 1;
break;
@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
- spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
- spin_lock(&qp->abts_nvme_buf_list_lock);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
spin_lock(&qp->io_buf_list_get_lock);
spin_lock(&qp->io_buf_list_put_lock);
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
qp->abts_nvme_io_bufs, out);
spin_unlock(&qp->io_buf_list_put_lock);
spin_unlock(&qp->io_buf_list_get_lock);
- spin_unlock(&qp->abts_nvme_buf_list_lock);
- spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
lpfc_debugfs_last_xripool++;
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
continue;
pbl_pool = &multixri_pool->pbl_pool;
pvt_pool = &multixri_pool->pvt_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
- if (qp->nvme_wq)
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
scnprintf(tmp, sizeof(tmp),
"%03d: %4d %4d %4d %4d | %10d %10d ",
@@ -2166,89 +2162,6 @@ out:
return rc;
}
-static int
-lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug;
- int rc = -ENOMEM;
-
- if (!_dump_buf_data)
- return -EBUSY;
-
- debug = kmalloc(sizeof(*debug), GFP_KERNEL);
- if (!debug)
- goto out;
-
- /* Round to page boundary */
- pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
- __func__, _dump_buf_data);
- debug->buffer = _dump_buf_data;
- if (!debug->buffer) {
- kfree(debug);
- goto out;
- }
-
- debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
- file->private_data = debug;
-
- rc = 0;
-out:
- return rc;
-}
-
-static int
-lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug;
- int rc = -ENOMEM;
-
- if (!_dump_buf_dif)
- return -EBUSY;
-
- debug = kmalloc(sizeof(*debug), GFP_KERNEL);
- if (!debug)
- goto out;
-
- /* Round to page boundary */
- pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
- __func__, _dump_buf_dif, file);
- debug->buffer = _dump_buf_dif;
- if (!debug->buffer) {
- kfree(debug);
- goto out;
- }
-
- debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
- file->private_data = debug;
-
- rc = 0;
-out:
- return rc;
-}
-
-static ssize_t
-lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
-{
- /*
- * The Data/DIF buffers only save one failing IO
- * The write op is used as a reset mechanism after an IO has
- * already been saved to the next one can be saved
- */
- spin_lock(&_dump_buf_lock);
-
- memset((void *)_dump_buf_data, 0,
- ((1 << PAGE_SHIFT) << _dump_buf_data_order));
- memset((void *)_dump_buf_dif, 0,
- ((1 << PAGE_SHIFT) << _dump_buf_dif_order));
-
- _dump_buf_done = 0;
-
- spin_unlock(&_dump_buf_lock);
-
- return nbytes;
-}
-
static ssize_t
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
@@ -2461,17 +2374,6 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
return 0;
}
-static int
-lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
-{
- struct lpfc_debug *debug = file->private_data;
-
- debug->buffer = NULL;
- kfree(debug);
-
- return 0;
-}
-
/**
* lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics
* @file: The file pointer to read from.
@@ -3786,23 +3688,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
int qidx;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp->assoc_qid != cq_id)
continue;
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
if (*len >= max_cnt)
return 1;
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
- if (qp->assoc_qid != cq_id)
- continue;
- *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
- if (*len >= max_cnt)
- return 1;
- }
- }
return 0;
}
@@ -3868,9 +3760,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
struct lpfc_queue *qp;
int rc;
- qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[eqidx].io_cq;
- *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+ *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
/* Reset max counter */
qp->CQ_max_cqe = 0;
@@ -3878,28 +3770,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
if (*len >= max_cnt)
return 1;
- rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
+ rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
max_cnt, qp->queue_id);
if (rc)
return 1;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
-
- *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
-
- /* Reset max counter */
- qp->CQ_max_cqe = 0;
-
- if (*len >= max_cnt)
- return 1;
-
- rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
- max_cnt, qp->queue_id);
- if (rc)
- return 1;
- }
-
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
@@ -4348,7 +4223,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
for (qidx = 0; qidx < phba->cfg_hdw_queue;
qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
+ qp = phba->sli4_hba.hdwq[qidx].io_cq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -4360,22 +4235,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
}
}
}
- /* NVME complete queue */
- if (phba->sli4_hba.hdwq) {
- qidx = 0;
- do {
- qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- } while (++qidx < phba->cfg_hdw_queue);
- }
goto error_out;
break;
case LPFC_IDIAG_MQ:
@@ -4419,20 +4278,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
if (phba->sli4_hba.hdwq) {
/* FCP/SCSI work queue */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
- if (qp && qp->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- qp, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = qp;
- goto pass_check;
- }
- }
- /* NVME work queue */
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ qp = phba->sli4_hba.hdwq[qidx].io_wq;
if (qp && qp->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
@@ -5508,26 +5354,6 @@ static const struct file_operations lpfc_debugfs_op_cpucheck = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_dumpData
-static const struct file_operations lpfc_debugfs_op_dumpData = {
- .owner = THIS_MODULE,
- .open = lpfc_debugfs_dumpData_open,
- .llseek = lpfc_debugfs_lseek,
- .read = lpfc_debugfs_read,
- .write = lpfc_debugfs_dumpDataDif_write,
- .release = lpfc_debugfs_dumpDataDif_release,
-};
-
-#undef lpfc_debugfs_op_dumpDif
-static const struct file_operations lpfc_debugfs_op_dumpDif = {
- .owner = THIS_MODULE,
- .open = lpfc_debugfs_dumpDif_open,
- .llseek = lpfc_debugfs_lseek,
- .read = lpfc_debugfs_read,
- .write = lpfc_debugfs_dumpDataDif_write,
- .release = lpfc_debugfs_dumpDataDif_release,
-};
-
#undef lpfc_debugfs_op_dif_err
static const struct file_operations lpfc_debugfs_op_dif_err = {
.owner = THIS_MODULE,
@@ -5924,20 +5750,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
} else
phba->debug_dumpHostSlim = NULL;
- /* Setup dumpData */
- snprintf(name, sizeof(name), "dumpData");
- phba->debug_dumpData =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpData);
-
- /* Setup dumpDif */
- snprintf(name, sizeof(name), "dumpDif");
- phba->debug_dumpDif =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpDif);
-
/* Setup DIF Error Injections */
snprintf(name, sizeof(name), "InjErrLBA");
phba->debug_InjErrLBA =
@@ -6315,12 +6127,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
phba->debug_dumpHostSlim = NULL;
- debugfs_remove(phba->debug_dumpData); /* dumpData */
- phba->debug_dumpData = NULL;
-
- debugfs_remove(phba->debug_dumpDif); /* dumpDif */
- phba->debug_dumpDif = NULL;
-
debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
phba->debug_InjErrLBA = NULL;
@@ -6442,12 +6248,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_wq(phba, DUMP_IO, idx);
lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba);
@@ -6459,12 +6260,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
- lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
- }
+ lpfc_debug_dump_cq(phba, DUMP_IO, idx);
/*
* Dump Event Queues (EQs)
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 34070874616d..20f2537af511 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -291,8 +291,7 @@ struct lpfc_idiag {
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
enum {
- DUMP_FCP,
- DUMP_NVME,
+ DUMP_IO,
DUMP_MBX,
DUMP_ELS,
DUMP_NVMELS,
@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
struct lpfc_queue *wq;
char *qtypestr;
- if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
- qtypestr = "FCP";
- } else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
- qtypestr = "NVME";
+ if (qtype == DUMP_IO) {
+ wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+ qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
qtypestr = "MBX";
@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
} else
return;
- if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ if (qtype == DUMP_IO)
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
qtypestr, wqidx, wq->queue_id);
else
@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
char *qtypestr;
int eqidx;
- /* fcp/nvme wq and cq are 1:1, thus same indexes */
+ /* io wq and cq are 1:1, thus same indexes */
eq = NULL;
- if (qtype == DUMP_FCP) {
- wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
- cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
- qtypestr = "FCP";
- } else if (qtype == DUMP_NVME) {
- wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
- cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
- qtypestr = "NVME";
+ if (qtype == DUMP_IO) {
+ wq = phba->sli4_hba.hdwq[wqidx].io_wq;
+ cq = phba->sli4_hba.hdwq[wqidx].io_cq;
+ qtypestr = "IO";
} else if (qtype == DUMP_MBX) {
wq = phba->sli4_hba.mbx_wq;
cq = phba->sli4_hba.mbx_cq;
@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
eq = phba->sli4_hba.hdwq[0].hba_eq;
}
- if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ if (qtype == DUMP_IO)
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
"->EQ[Idx:%d|Qid:%d]:\n",
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
int wq_idx;
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
- if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
+ if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
break;
if (wq_idx < phba->cfg_hdw_queue) {
- pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
- return;
- }
-
- for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
- if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
- break;
- if (wq_idx < phba->cfg_hdw_queue) {
- pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
+ pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
return;
}
@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
int cq_idx;
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
- if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
- break;
-
- if (cq_idx < phba->cfg_hdw_queue) {
- pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
- return;
- }
-
- for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
- if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
+ if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
break;
if (cq_idx < phba->cfg_hdw_queue) {
- pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
- lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
+ pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1c89c9f314fa..482e4a888dae 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -112,6 +112,8 @@ struct lpfc_nodelist {
uint8_t nlp_retry; /* used for ELS retries */
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
+ u8 nlp_nvme_info; /* NVME NSLER Support */
+#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
uint16_t nlp_usg_map; /* ndlp management usage bitmap */
#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
@@ -157,6 +159,7 @@ struct lpfc_node_rrq {
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
+#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */
#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */
#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f12780f4cfbb..d5303994bfd6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1052,17 +1052,18 @@ stop_rr_fcf_flogi:
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x "
+ "xri x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ cmdiocb->sli4_xritag, irsp->ulpTimeout);
+
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
- "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout);
-
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1207,6 +1208,39 @@ out:
}
/**
+ * lpfc_cmpl_els_link_down - Completion callback function for ELS command
+ * aborted during a link down
+ * @phba: pointer to lpfc hba data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @rspiocb: pointer to lpfc response iocb data structure.
+ *
+ */
+static void
+lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ IOCB_t *irsp;
+ uint32_t *pcmd;
+ uint32_t cmd;
+
+ pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+ cmd = *pcmd;
+ irsp = &rspiocb->iocb;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+ "6445 ELS completes after LINK_DOWN: "
+ " Status %x/%x cmd x%x flg x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
+ cmdiocb->iocb_flag);
+
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
+ cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ atomic_dec(&phba->fabric_iocb_count);
+ }
+ lpfc_els_free_iocb(phba, cmdiocb);
+}
+
+/**
* lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -2107,7 +2141,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
!(vport->fc_flag & FC_OFFLINE_MODE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"4110 Issue PLOGI x%x deferred "
- "on NPort x%x rpi x%x Data: %p\n",
+ "on NPort x%x rpi x%x Data: x%px\n",
ndlp->nlp_defer_did, ndlp->nlp_DID,
ndlp->nlp_rpi, ndlp);
@@ -2401,6 +2435,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr_nvme = (struct lpfc_nvme_prli *)pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
+ if (phba->nsler) {
+ bf_set(prli_nsler, npr_nvme, 1);
+ bf_set(prli_conf, npr_nvme, 1);
+ }
/* Only initiators request first burst. */
if ((phba->cfg_nvme_enable_fb) &&
@@ -4203,7 +4241,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
+ "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5634,16 +5672,16 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
if (vport->fc_flag & FC_FABRIC) {
memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
- sizeof(desc->port_names.wwnn));
+ sizeof(desc->port_names.wwnn));
memcpy(desc->port_names.wwpn, &vport->fabric_portname,
- sizeof(desc->port_names.wwpn));
+ sizeof(desc->port_names.wwpn));
} else { /* Point to Point */
memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
- sizeof(desc->port_names.wwnn));
+ sizeof(desc->port_names.wwnn));
- memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
- sizeof(desc->port_names.wwpn));
+ memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
+ sizeof(desc->port_names.wwpn));
}
desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -6327,7 +6365,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
continue;
}
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ /* Check to see if we need to NVME rescan this target
+ * remoteport.
+ */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
lpfc_nvme_rescan_port(vport, ndlp);
lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -6441,7 +6483,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
*lp, vport->fc_flag, payload_len);
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
+ /* Check to see if we need to NVME rescan this target
+ * remoteport.
+ */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
+ ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
lpfc_nvme_rescan_port(vport, ndlp);
return 0;
}
@@ -7960,18 +8006,40 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ /* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
if (piocb->iocb_flag & LPFC_IO_LIBDFC)
continue;
if (piocb->vport != vport)
continue;
- list_add_tail(&piocb->dlist, &abort_list);
+
+ /* On the ELS ring we can have ELS_REQUESTs or
+ * GEN_REQUESTs waiting for a response.
+ */
+ cmd = &piocb->iocb;
+ if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ list_add_tail(&piocb->dlist, &abort_list);
+
+ /* If the link is down when flushing ELS commands
+ * the firmware will not complete them till after
+ * the link comes back up. This may confuse
+ * discovery for the new link up, so we need to
+ * change the compl routine to just clean up the iocb
+ * and avoid any retry logic.
+ */
+ if (phba->link_state == LPFC_LINK_DOWN)
+ piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+ }
+ if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+ list_add_tail(&piocb->dlist, &abort_list);
}
+
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
- /* Abort each iocb on the aborted list and remove the dlist links. */
+
+ /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
@@ -7987,6 +8055,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ /* No need to abort the txq list,
+ * just queue them up for lpfc_sli_cancel_iocbs
+ */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
cmd = &piocb->iocb;
@@ -8007,11 +8078,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_del_init(&piocb->list);
list_add_tail(&piocb->list, &abort_list);
}
+
+ /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
+ if (vport == phba->pport) {
+ list_for_each_entry_safe(piocb, tmp_iocb,
+ &phba->fabric_iocb_list, list) {
+ cmd = &piocb->iocb;
+ list_del_init(&piocb->list);
+ list_add_tail(&piocb->list, &abort_list);
+ }
+ }
+
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring->ring_lock);
spin_unlock_irq(&phba->hbalock);
- /* Cancell all the IOCBs from the completions list */
+ /* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &abort_list,
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 28ecaa7fc715..749286acdc17 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -118,6 +118,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
struct lpfc_work_evt *evtp;
int put_node;
int put_rport;
+ unsigned long iflags;
rdata = rport->dd_data;
ndlp = rdata->pnode;
@@ -132,7 +133,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
+ "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/* Don't defer this if we are in the process of deleting the vport
@@ -170,22 +171,22 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
}
shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
/* We need to hold the node by incrementing the reference
* count until this queued work is done
*/
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (evtp->evt_arg1) {
evtp->evt = LPFC_EVT_DEV_LOSS;
list_add_tail(&evtp->evt_listp, &phba->work_list);
lpfc_worker_wake_up(phba);
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -212,14 +213,15 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
int put_node;
int warn_on = 0;
int fcf_inuse = 0;
+ unsigned long iflags;
rport = ndlp->rport;
vport = ndlp->vport;
shost = lpfc_shost_from_vport(vport);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
if (!rport)
return fcf_inuse;
@@ -235,7 +237,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
+ "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
/*
@@ -903,6 +905,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->sli4_hba.link_state.logical_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
}
spin_lock_irq(shost->host_lock);
phba->pport->fc_flag &= ~FC_LBIT;
@@ -3115,8 +3119,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
int rc;
struct fcf_record *fcf_record;
uint32_t fc_flags = 0;
+ unsigned long iflags;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -3213,12 +3218,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
vport->fc_myDID = phba->fc_pref_DID;
fc_flags |= FC_LBIT;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
if (fc_flags) {
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
vport->fc_flag |= fc_flags;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
}
lpfc_linkup(phba);
@@ -3292,22 +3297,22 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
* The driver is expected to do FIP/FCF. Call the port
* and get the FCF Table.
*/
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->hba_flag & FCF_TS_INPROG) {
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
/* This is the initial FCF discovery scan */
phba->fcf.fcf_flag |= FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2778 Start FCF table scan at linkup\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
/* Reset FCF roundrobin bmask for new discovery */
@@ -3318,7 +3323,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
out:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
- "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
+ "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
vport->port_state, sparam_mbox, cfglink_mbox);
lpfc_issue_clear_la(phba, vport);
return;
@@ -3366,6 +3371,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
uint8_t attn_type;
+ unsigned long iflags;
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
@@ -3387,12 +3393,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->alpa_map[0], mp->virt, 128);
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
if (bf_get(lpfc_mbx_read_top_pb, la))
vport->fc_flag |= FC_BYPASSED_MODE;
else
vport->fc_flag &= ~FC_BYPASSED_MODE;
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
if (phba->fc_eventTag <= la->eventTag) {
phba->fc_stat.LinkMultiEvent++;
@@ -3403,12 +3409,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->fc_eventTag = la->eventTag;
if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_mbx_read_top_mm, la))
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
else
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
phba->link_events++;
@@ -3529,7 +3535,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->ctx_ndlp = NULL;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4041,7 +4047,7 @@ out:
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
+ "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4160,7 +4166,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
fc_remote_port_rolechg(rport, rport_ids.roles);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "3183 rport register x%06x, rport %p role x%x\n",
+ "3183 rport register x%06x, rport x%px role x%x\n",
ndlp->nlp_DID, rport, rport_ids.roles);
if ((rport->scsi_target_id != -1) &&
@@ -4184,7 +4190,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3184 rport unregister x%06x, rport %p\n",
+ "3184 rport unregister x%06x, rport x%px\n",
ndlp->nlp_DID, rport);
fc_remote_port_delete(rport);
@@ -4196,8 +4202,9 @@ static void
lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
switch (state) {
case NLP_STE_UNUSED_NODE:
vport->fc_unused_cnt += count;
@@ -4227,7 +4234,7 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
vport->fc_npr_cnt += count;
break;
}
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
}
static void
@@ -4480,9 +4487,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return NULL;
if (phba->sli_rev == LPFC_SLI_REV4) {
- rpi = lpfc_sli4_alloc_rpi(vport->phba);
- if (rpi == LPFC_RPI_ALLOC_ERROR)
+ if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
+ rpi = lpfc_sli4_alloc_rpi(vport->phba);
+ else
+ rpi = ndlp->nlp_rpi;
+
+ if (rpi == LPFC_RPI_ALLOC_ERROR) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "0359 %s: ndlp:x%px "
+ "usgmap:x%x refcnt:%d FAILED RPI "
+ " ALLOC\n",
+ __func__,
+ (void *)ndlp, ndlp->nlp_usg_map,
+ kref_read(&ndlp->kref));
return NULL;
+ }
}
spin_lock_irqsave(&phba->ndlp_lock, flags);
@@ -4490,9 +4509,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (NLP_CHK_FREE_REQ(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0277 lpfc_enable_node: ndlp:x%p "
+ "0277 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
goto free_rpi;
}
@@ -4500,9 +4519,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (NLP_CHK_NODE_ACT(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0278 lpfc_enable_node: ndlp:x%p "
+ "0278 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
goto free_rpi;
}
@@ -4532,7 +4551,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -4541,6 +4560,14 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (state != NLP_STE_UNUSED_NODE)
lpfc_nlp_set_state(vport, ndlp, state);
+ else
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0013 rpi:%x DID:%x flg:%x refcnt:%d "
+ "map:%x x%px STATE=UNUSED\n",
+ ndlp->nlp_rpi, ndlp->nlp_DID,
+ ndlp->nlp_flag,
+ kref_read(&ndlp->kref),
+ ndlp->nlp_usg_map, ndlp);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
"node enable: did:x%x",
@@ -4797,7 +4824,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
(ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1434 UNREG cmpl deferred logo x%x "
- "on NPort x%x Data: x%x %p\n",
+ "on NPort x%x Data: x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did, ndlp);
@@ -4805,6 +4832,10 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ }
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
}
@@ -4843,7 +4874,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1436 unreg_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
- "Data: %p\n",
+ "Data: x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did,
ndlp->nlp_flag, ndlp);
@@ -4893,7 +4924,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1433 unreg_rpi UNREG x%x on "
- "NPort x%x deferred flg x%x Data:%p\n",
+ "NPort x%x deferred flg x%x "
+ "Data:x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp);
@@ -5034,16 +5066,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
if (NLP_CHK_FREE_REQ(ndlp)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0280 lpfc_cleanup_node: ndlp:x%p "
+ "0280 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
lpfc_dequeue_node(vport, ndlp);
} else {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0281 lpfc_cleanup_node: ndlp:x%p "
+ "0281 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
lpfc_disable_node(vport, ndlp);
}
@@ -5104,6 +5136,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
lpfc_cleanup_vports_rrqs(vport, ndlp);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ ndlp->nlp_flag |= NLP_RELEASE_RPI;
lpfc_unreg_rpi(vport, ndlp);
return 0;
@@ -5132,7 +5166,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* allocated by the firmware.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ "0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -5168,8 +5202,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* for registered rport so need to cleanup rport
*/
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "0940 removed node x%p DID x%x "
- " rport not null %p\n",
+ "0940 removed node x%px DID x%x "
+ " rport not null x%px\n",
ndlp, ndlp->nlp_DID, ndlp->rport);
rport = ndlp->rport;
rdata = rport->dd_data;
@@ -5243,15 +5277,15 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (lpfc_matchdid(vport, ndlp, did)) {
- data1 = (((uint32_t) ndlp->nlp_state << 24) |
- ((uint32_t) ndlp->nlp_xri << 16) |
- ((uint32_t) ndlp->nlp_type << 8) |
- ((uint32_t) ndlp->nlp_rpi & 0xff));
+ data1 = (((uint32_t)ndlp->nlp_state << 24) |
+ ((uint32_t)ndlp->nlp_xri << 16) |
+ ((uint32_t)ndlp->nlp_type << 8) |
+ ((uint32_t)ndlp->nlp_usg_map & 0xff));
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0929 FIND node DID "
- "Data: x%p x%x x%x x%x %p\n",
+ "Data: x%px x%x x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag, data1,
+ ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
return ndlp;
}
@@ -5296,7 +5330,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
spin_unlock_irqrestore(shost->host_lock, iflags);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"2025 FIND node DID "
- "Data: x%p x%x x%x x%x %p\n",
+ "Data: x%px x%x x%x x%x x%px\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1,
ndlp->active_rrqs_xri_bitmap);
@@ -5336,8 +5370,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
if (vport->phba->nvmet_support)
return NULL;
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
- if (!ndlp)
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
+ "0014 Could not enable ndlp\n");
return NULL;
+ }
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5960,7 +5997,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_type |= NLP_FABRIC;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
+ "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -6014,8 +6051,8 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3185 FIND node filter %p DID "
- "ndlp %p did x%x flg x%x st x%x "
+ "3185 FIND node filter %ps DID "
+ "ndlp x%px did x%x flg x%x st x%x "
"xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state,
@@ -6025,7 +6062,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
}
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "3186 FIND node filter %p NOT FOUND.\n", filter);
+ "3186 FIND node filter %ps NOT FOUND.\n", filter);
return NULL;
}
@@ -6065,10 +6102,11 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long flags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, flags);
ndlp = __lpfc_findnode_rpi(vport, rpi);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, flags);
return ndlp;
}
@@ -6149,7 +6187,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
- "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
+ "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag,
kref_read(&ndlp->kref),
ndlp->nlp_usg_map, ndlp);
@@ -6187,8 +6225,9 @@ lpfc_nlp_release(struct kref *kref)
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
- "0279 lpfc_nlp_release: ndlp:x%p did %x "
+ "0279 %s: ndlp:x%px did %x "
"usgmap:x%x refcnt:%d rpi:%x\n",
+ __func__,
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
kref_read(&ndlp->kref), ndlp->nlp_rpi);
@@ -6200,8 +6239,6 @@ lpfc_nlp_release(struct kref *kref)
spin_lock_irqsave(&phba->ndlp_lock, flags);
NLP_CLR_NODE_ACT(ndlp);
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
/* free ndlp memory for final ndlp release */
if (NLP_CHK_FREE_REQ(ndlp)) {
@@ -6237,9 +6274,9 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0276 lpfc_nlp_get: ndlp:x%p "
+ "0276 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return NULL;
} else
@@ -6265,9 +6302,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
return 1;
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node put: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
+ "node put: did:x%x flg:x%x refcnt:x%x",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ kref_read(&ndlp->kref));
phba = ndlp->phba;
spin_lock_irqsave(&phba->ndlp_lock, flags);
/* Check the ndlp memory free acknowledge flag to avoid the
@@ -6277,9 +6314,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
if (NLP_CHK_FREE_ACK(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0274 lpfc_nlp_put: ndlp:x%p "
+ "0274 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return 1;
}
@@ -6290,9 +6327,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
if (NLP_CHK_IACT_REQ(ndlp)) {
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
- "0275 lpfc_nlp_put: ndlp:x%p "
+ "0275 %s: ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
- (void *)ndlp, ndlp->nlp_usg_map,
+ __func__, (void *)ndlp, ndlp->nlp_usg_map,
kref_read(&ndlp->kref));
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 5b439a6dcde1..436cdc8c5ef4 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -843,7 +843,7 @@ typedef struct _ADISC { /* Structure is in Big Endian format */
struct lpfc_name portName;
struct lpfc_name nodeName;
uint32_t DID;
-} ADISC;
+} __packed ADISC;
typedef struct _FARP { /* Structure is in Big Endian format */
uint32_t Mflags:8;
@@ -873,7 +873,7 @@ typedef struct _FAN { /* Structure is in Big Endian format */
uint32_t Fdid;
struct lpfc_name FportName;
struct lpfc_name FnodeName;
-} FAN;
+} __packed FAN;
typedef struct _SCR { /* Structure is in Big Endian format */
uint8_t resvd1;
@@ -917,7 +917,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */
union {
RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
} un;
-} RNID;
+} __packed RNID;
typedef struct _RPS { /* Structure is in Big Endian format */
union {
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 77f9a55a3f54..bd533475c86a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -2050,6 +2050,23 @@ struct sli4_sge { /* SLI-4 */
uint32_t sge_len;
};
+struct sli4_hybrid_sgl {
+ struct list_head list_node;
+ struct sli4_sge *dma_sgl;
+ dma_addr_t dma_phys_sgl;
+};
+
+struct fcp_cmd_rsp_buf {
+ struct list_head list_node;
+
+ /* for storing cmd/rsp dma alloc'ed virt_addr */
+ struct fcp_cmnd *fcp_cmnd;
+ struct fcp_rsp *fcp_rsp;
+
+ /* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */
+ dma_addr_t fcp_cmd_rsp_dma_handle;
+};
+
struct sli4_sge_diseed { /* SLI-4 */
uint32_t ref_tag;
uint32_t ref_tag_tran;
@@ -3449,6 +3466,9 @@ struct lpfc_sli4_parameters {
#define cfg_xib_SHIFT 4
#define cfg_xib_MASK 0x00000001
#define cfg_xib_WORD word19
+#define cfg_xpsgl_SHIFT 6
+#define cfg_xpsgl_MASK 0x00000001
+#define cfg_xpsgl_WORD word19
#define cfg_eqdr_SHIFT 8
#define cfg_eqdr_MASK 0x00000001
#define cfg_eqdr_WORD word19
@@ -3460,6 +3480,10 @@ struct lpfc_sli4_parameters {
#define cfg_bv1s_MASK 0x00000001
#define cfg_bv1s_WORD word19
+#define cfg_nsler_SHIFT 12
+#define cfg_nsler_MASK 0x00000001
+#define cfg_nsler_WORD word19
+
uint32_t word20;
#define cfg_max_tow_xri_SHIFT 0
#define cfg_max_tow_xri_MASK 0x0000ffff
@@ -4314,6 +4338,12 @@ struct wqe_common {
#define wqe_rcvoxid_SHIFT 16
#define wqe_rcvoxid_MASK 0x0000FFFF
#define wqe_rcvoxid_WORD word9
+#define wqe_sof_SHIFT 24
+#define wqe_sof_MASK 0x000000FF
+#define wqe_sof_WORD word9
+#define wqe_eof_SHIFT 16
+#define wqe_eof_MASK 0x000000FF
+#define wqe_eof_WORD word9
uint32_t word10;
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
@@ -4595,6 +4625,7 @@ struct lpfc_nvme_prli {
#define prli_type_code_WORD word1
uint32_t word_rsvd2;
uint32_t word_rsvd3;
+
uint32_t word4;
#define prli_fba_SHIFT 0
#define prli_fba_MASK 0x00000001
@@ -4611,6 +4642,9 @@ struct lpfc_nvme_prli {
#define prli_conf_SHIFT 7
#define prli_conf_MASK 0x00000001
#define prli_conf_WORD word4
+#define prli_nsler_SHIFT 8
+#define prli_nsler_MASK 0x00000001
+#define prli_nsler_WORD word4
uint32_t word5;
#define prli_fb_sz_SHIFT 0
#define prli_fb_sz_MASK 0x0000ffff
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 1ac98becb5ba..e91377a4cafe 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -39,6 +39,7 @@
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/bitops.h>
+#include <linux/crash_dump.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -65,12 +66,6 @@
#include "lpfc_version.h"
#include "lpfc_ids.h"
-char *_dump_buf_data;
-unsigned long _dump_buf_data_order;
-char *_dump_buf_dif;
-unsigned long _dump_buf_dif_order;
-spinlock_t _dump_buf_lock;
-
/* Used when mapping IRQ vectors in a driver centric manner */
static uint32_t lpfc_present_cpu;
@@ -1081,8 +1076,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- spin_lock(&qp->abts_scsi_buf_list_lock);
- list_splice_init(&qp->lpfc_abts_scsi_buf_list,
+ spin_lock(&qp->abts_io_buf_list_lock);
+ list_splice_init(&qp->lpfc_abts_io_buf_list,
&aborts);
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
@@ -1093,29 +1088,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_lock(&qp->io_buf_list_put_lock);
list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
qp->put_io_bufs += qp->abts_scsi_io_bufs;
+ qp->put_io_bufs += qp->abts_nvme_io_bufs;
qp->abts_scsi_io_bufs = 0;
+ qp->abts_nvme_io_bufs = 0;
spin_unlock(&qp->io_buf_list_put_lock);
- spin_unlock(&qp->abts_scsi_buf_list_lock);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- spin_lock(&qp->abts_nvme_buf_list_lock);
- list_splice_init(&qp->lpfc_abts_nvme_buf_list,
- &nvme_aborts);
- list_for_each_entry_safe(psb, psb_next, &nvme_aborts,
- list) {
- psb->pCmd = NULL;
- psb->status = IOSTAT_SUCCESS;
- cnt++;
- }
- spin_lock(&qp->io_buf_list_put_lock);
- qp->put_io_bufs += qp->abts_nvme_io_bufs;
- qp->abts_nvme_io_bufs = 0;
- list_splice_init(&nvme_aborts,
- &qp->lpfc_io_buf_list_put);
- spin_unlock(&qp->io_buf_list_put_lock);
- spin_unlock(&qp->abts_nvme_buf_list_lock);
-
- }
+ spin_unlock(&qp->abts_io_buf_list_lock);
}
spin_unlock_irq(&phba->hbalock);
@@ -1261,6 +1238,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
unsigned char *eqcnt = NULL;
uint32_t usdelay;
int i;
+ bool update = false;
if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
return;
@@ -1274,20 +1252,29 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
if (!eqcnt)
goto requeue;
- /* Loop thru all IRQ vectors */
- for (i = 0; i < phba->cfg_irq_chann; i++) {
- /* Get the EQ corresponding to the IRQ vector */
- eq = phba->sli4_hba.hba_eq_hdl[i].eq;
- if (eq && eqcnt[eq->last_cpu] < 2)
- eqcnt[eq->last_cpu]++;
- continue;
- }
+ if (phba->cfg_irq_chann > 1) {
+ /* Loop thru all IRQ vectors */
+ for (i = 0; i < phba->cfg_irq_chann; i++) {
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
+ if (!eq)
+ continue;
+ if (eq->q_mode) {
+ update = true;
+ break;
+ }
+ if (eqcnt[eq->last_cpu] < 2)
+ eqcnt[eq->last_cpu]++;
+ }
+ } else
+ update = true;
for_each_present_cpu(i) {
- if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2)
- continue;
-
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
+ if (!update && eqcnt[i] < 2) {
+ eqi->icnt = 0;
+ continue;
+ }
usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) *
LPFC_EQ_DELAY_STEP;
@@ -1535,6 +1522,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
lpfc_hba_down_post(phba);
lpfc_unblock_mgmt_io(phba);
@@ -1796,6 +1784,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
"2887 Reset Needed: Attempting Port "
"Recovery...\n");
lpfc_offline_prep(phba, mbx_action);
+ lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
/* release interrupt for possible resource change */
lpfc_sli4_disable_intr(phba);
@@ -1915,7 +1904,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"7624 Firmware not ready: Failing UE recovery,"
" waited %dSec", i);
- lpfc_sli4_offline_eratt(phba);
+ phba->link_state = LPFC_HBA_ERROR;
break;
case LPFC_SLI_INTF_IF_TYPE_2:
@@ -1989,9 +1978,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
/* fall through for not able to recover */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3152 Unrecoverable error, bring the port "
- "offline\n");
- lpfc_sli4_offline_eratt(phba);
+ "3152 Unrecoverable error\n");
+ phba->link_state = LPFC_HBA_ERROR;
break;
case LPFC_SLI_INTF_IF_TYPE_1:
default:
@@ -2863,7 +2851,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
LOG_NODE,
- "0282 did:x%x ndlp:x%p "
+ "0282 did:x%x ndlp:x%px "
"usgmap:x%x refcnt:%d\n",
ndlp->nlp_DID, (void *)ndlp,
ndlp->nlp_usg_map,
@@ -3067,7 +3055,7 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba)
ndlp->nlp_rpi = rpi;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0009 rpi:%x DID:%x "
- "flg:%x map:%x %p\n", ndlp->nlp_rpi,
+ "flg:%x map:%x x%px\n", ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_usg_map, ndlp);
}
@@ -3252,12 +3240,8 @@ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
lpfc_destroy_expedite_pool(phba);
- if (!(phba->pport->load_flag & FC_UNLOADING)) {
- lpfc_sli_flush_fcp_rings(phba);
-
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
- }
+ if (!(phba->pport->load_flag & FC_UNLOADING))
+ lpfc_sli_flush_io_rings(phba);
hwq_count = phba->cfg_hdw_queue;
@@ -3491,7 +3475,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_printf_vlog(ndlp->vport,
KERN_INFO, LOG_NODE,
"0011 lpfc_offline: "
- "ndlp:x%p did %x "
+ "ndlp:x%px did %x "
"usgmap:x%x rpi:%x\n",
ndlp, ndlp->nlp_DID,
ndlp->nlp_usg_map,
@@ -3636,6 +3620,9 @@ lpfc_io_free(struct lpfc_hba *phba)
qp->put_io_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
kfree(lpfc_ncmd);
qp->total_io_bufs--;
}
@@ -3649,6 +3636,9 @@ lpfc_io_free(struct lpfc_hba *phba)
qp->get_io_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
kfree(lpfc_ncmd);
qp->total_io_bufs--;
}
@@ -4097,18 +4087,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
LIST_HEAD(post_nblist);
LIST_HEAD(nvme_nblist);
- /* Sanity check to ensure our sizing is right for both SCSI and NVME */
- if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "6426 Common buffer size %zd exceeds %d\n",
- sizeof(struct lpfc_io_buf),
- LPFC_COMMON_IO_BUF_SZ);
- return 0;
- }
-
phba->sli4_hba.io_xri_cnt = 0;
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
- lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
+ lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
if (!lpfc_ncmd)
break;
/*
@@ -4124,22 +4105,30 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
break;
}
- /*
- * 4K Page alignment is CRITICAL to BlockGuard, double check
- * to be sure.
- */
- if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- (((unsigned long)(lpfc_ncmd->data) &
- (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "3369 Memory alignment err: addr=%lx\n",
- (unsigned long)lpfc_ncmd->data);
- dma_pool_free(phba->lpfc_sg_dma_buf_pool,
- lpfc_ncmd->data, lpfc_ncmd->dma_handle);
- kfree(lpfc_ncmd);
- break;
+ if (phba->cfg_xpsgl && !phba->nvmet_support) {
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
+ } else {
+ /*
+ * 4K Page alignment is CRITICAL to BlockGuard, double
+ * check to be sure.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+ (((unsigned long)(lpfc_ncmd->data) &
+ (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
+ "3369 Memory alignment err: "
+ "addr=%lx\n",
+ (unsigned long)lpfc_ncmd->data);
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data,
+ lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ break;
+ }
}
+ INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
+
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
@@ -4318,7 +4307,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
+ else
+ shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -6336,6 +6329,24 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
return -ENOMEM;
+ phba->lpfc_sg_dma_buf_pool =
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
+ BPL_ALIGN_SZ, 0);
+
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto fail_free_mem;
+
+ phba->lpfc_cmd_rsp_buf_pool =
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
+ &phba->pcidev->dev,
+ sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp),
+ BPL_ALIGN_SZ, 0);
+
+ if (!phba->lpfc_cmd_rsp_buf_pool)
+ goto fail_free_dma_buf_pool;
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6354,6 +6365,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
return 0;
+
+fail_free_dma_buf_pool:
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
+fail_free_mem:
+ lpfc_mem_free(phba);
+ return -ENOMEM;
}
/**
@@ -6414,6 +6432,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
+ /* Allocate all driver workqueues here */
+
+ /* The lpfc_wq workqueue for deferred irq use */
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+
/*
* Initialize timers used by driver
*/
@@ -6448,102 +6471,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* The WQ create will allocate the ring.
*/
- /*
- * 1 for cmd, 1 for rsp, NVME adds an extra one
- * for boundary conditions in its max_sgl_segment template.
- */
- extra = 2;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- extra++;
-
- /*
- * It doesn't matter what family our adapter is in, we are
- * limited to 2 Pages, 512 SGEs, for our SGL.
- * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
- */
- max_buf_size = (2 * SLI4_PAGE_SIZE);
-
- /*
- * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
- * used to create the sg_dma_buf_pool must be calculated.
- */
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
- /*
- * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
- * the FCP rsp, and a SGE. Sice we have no control
- * over how many protection segments the SCSI Layer
- * will hand us (ie: there could be one for every block
- * in the IO), just allocate enough SGEs to accomidate
- * our max amount and we need to limit lpfc_sg_seg_cnt
- * to minimize the risk of running out.
- */
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) + max_buf_size;
-
- /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
- phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
-
- /*
- * If supporting DIF, reduce the seg count for scsi to
- * allow room for the DIF sges.
- */
- if (phba->cfg_enable_bg &&
- phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
- phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
- else
- phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
-
- } else {
- /*
- * The scsi_buf for a regular I/O holds the FCP cmnd,
- * the FCP rsp, a SGE for each, and a SGE for up to
- * cfg_sg_seg_cnt data segments.
- */
- phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + extra) *
- sizeof(struct sli4_sge));
-
- /* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
- phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
-
- /*
- * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
- * need to post 1 page for the SGL.
- */
- }
-
- /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
- "6300 Reducing NVME sg segment "
- "cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
-
- /* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
-
- if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
- phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
- else
- phba->cfg_sg_dma_buf_size =
- SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9087 sg_seg_cnt:%d dmabuf_size:%d "
- "total:%d scsi:%d nvme:%d\n",
- phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
- phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
- phba->cfg_nvme_seg_cnt);
-
/* Initialize buffer queue management fields */
INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
@@ -6552,11 +6479,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/*
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
*/
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- /* Initialize the Abort scsi buffer list used by driver */
- spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
- }
+ /* Initialize the Abort buffer list used by driver */
+ spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Initialize the Abort nvme buffer list used by driver */
@@ -6764,6 +6689,131 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
+ /*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
+ * It doesn't matter what family our adapter is in, we are
+ * limited to 2 Pages, 512 SGEs, for our SGL.
+ * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+ */
+ max_buf_size = (2 * SLI4_PAGE_SIZE);
+
+ /*
+ * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be calculated.
+ */
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
+ /* Both cfg_enable_bg and cfg_external_dif code paths */
+
+ /*
+ * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
+ * the FCP rsp, and a SGE. Sice we have no control
+ * over how many protection segments the SCSI Layer
+ * will hand us (ie: there could be one for every block
+ * in the IO), just allocate enough SGEs to accomidate
+ * our max amount and we need to limit lpfc_sg_seg_cnt
+ * to minimize the risk of running out.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) + max_buf_size;
+
+ /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+ phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+ /*
+ * If supporting DIF, reduce the seg count for scsi to
+ * allow room for the DIF sges.
+ */
+ if (phba->cfg_enable_bg &&
+ phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+ phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+ else
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+ } else {
+ /*
+ * The scsi_buf for a regular I/O holds the FCP cmnd,
+ * the FCP rsp, a SGE for each, and a SGE for up to
+ * cfg_sg_seg_cnt data segments.
+ */
+ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + extra) *
+ sizeof(struct sli4_sge));
+
+ /* Total SGEs for scsi_sg_list */
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
+ /*
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
+ * need to post 1 page for the SGL.
+ */
+ }
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
+ else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
+ phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+ else
+ phba->cfg_sg_dma_buf_size =
+ SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+ phba->border_sge_num = phba->cfg_sg_dma_buf_size /
+ sizeof(struct sli4_sge);
+
+ /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing NVME sg segment "
+ "cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+
+ /* Initialize the host templates with the updated values. */
+ lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9087 sg_seg_cnt:%d dmabuf_size:%d "
+ "total:%d scsi:%d nvme:%d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+ phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
+
+ if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+ i = phba->cfg_sg_dma_buf_size;
+ else
+ i = SLI4_PAGE_SIZE;
+
+ phba->lpfc_sg_dma_buf_pool =
+ dma_pool_create("lpfc_sg_dma_buf_pool",
+ &phba->pcidev->dev,
+ phba->cfg_sg_dma_buf_size,
+ i, 0);
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto out_free_bsmbx;
+
+ phba->lpfc_cmd_rsp_buf_pool =
+ dma_pool_create("lpfc_cmd_rsp_buf_pool",
+ &phba->pcidev->dev,
+ sizeof(struct fcp_cmnd) +
+ sizeof(struct fcp_rsp),
+ i, 0);
+ if (!phba->lpfc_cmd_rsp_buf_pool)
+ goto out_free_sg_dma_buf;
+
mempool_free(mboxq, phba->mbox_mem_pool);
/* Verify OAS is supported */
@@ -6775,12 +6825,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
- goto out_free_bsmbx;
+ goto out_free_cmd_rsp_buf;
/* Create driver internal CQE event pool */
rc = lpfc_sli4_cq_event_pool_create(phba);
if (rc)
- goto out_free_bsmbx;
+ goto out_free_cmd_rsp_buf;
/* Initialize sgl lists per host */
lpfc_init_sgl_list(phba);
@@ -6871,6 +6921,12 @@ out_free_active_sgl:
lpfc_free_active_sgl(phba);
out_destroy_cq_event_pool:
lpfc_sli4_cq_event_pool_destroy(phba);
+out_free_cmd_rsp_buf:
+ dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+ phba->lpfc_cmd_rsp_buf_pool = NULL;
+out_free_sg_dma_buf:
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
@@ -6997,12 +7053,6 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
return error;
}
- /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */
- if (phba->sli_rev == LPFC_SLI_REV4)
- phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
- else
- phba->wq = NULL;
-
return 0;
}
@@ -7563,7 +7613,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
uint32_t old_mask;
uint32_t old_guard;
- int pagecnt = 10;
if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
@@ -7600,56 +7649,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
"layer, Bad protection parameters: %d %d\n",
old_mask, old_guard);
}
-
- if (!_dump_buf_data) {
- while (pagecnt) {
- spin_lock_init(&_dump_buf_lock);
- _dump_buf_data =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9043 BLKGRD: allocated %d pages for "
- "_dump_buf_data at 0x%p\n",
- (1 << pagecnt), _dump_buf_data);
- _dump_buf_data_order = pagecnt;
- memset(_dump_buf_data, 0,
- ((1 << PAGE_SHIFT) << pagecnt));
- break;
- } else
- --pagecnt;
- }
- if (!_dump_buf_data_order)
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9044 BLKGRD: ERROR unable to allocate "
- "memory for hexdump\n");
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
- "\n", _dump_buf_data);
- if (!_dump_buf_dif) {
- while (pagecnt) {
- _dump_buf_dif =
- (char *) __get_free_pages(GFP_KERNEL, pagecnt);
- if (_dump_buf_dif) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9046 BLKGRD: allocated %d pages for "
- "_dump_buf_dif at 0x%p\n",
- (1 << pagecnt), _dump_buf_dif);
- _dump_buf_dif_order = pagecnt;
- memset(_dump_buf_dif, 0,
- ((1 << PAGE_SHIFT) << pagecnt));
- break;
- } else
- --pagecnt;
- }
- if (!_dump_buf_dif_order)
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9047 BLKGRD: ERROR unable to allocate "
- "memory for hexdump\n");
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
- _dump_buf_dif);
}
/**
@@ -8309,6 +8308,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+ /* Reduce resource usage in kdump environment */
+ if (is_kdump_kernel() &&
+ phba->sli4_hba.max_cfg_param.max_xri > 512)
+ phba->sli4_hba.max_cfg_param.max_xri = 512;
phba->sli4_hba.max_cfg_param.xri_base =
bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
phba->sli4_hba.max_cfg_param.max_vpi =
@@ -8382,11 +8385,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
*/
qmin -= 4;
- /* If NVME is configured, double the number of CQ/WQs needed */
- if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
- !phba->nvmet_support)
- qmin /= 2;
-
/* Check to see if there is enough for NVME */
if ((phba->cfg_irq_chann > qmin) ||
(phba->cfg_hdw_queue > qmin)) {
@@ -8643,51 +8641,14 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
}
static int
-lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
-{
- struct lpfc_queue *qdesc;
- int cpu;
-
- cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
- phba->sli4_hba.cq_esize,
- LPFC_CQE_EXP_COUNT, cpu);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0508 Failed allocate fast-path NVME CQ (%d)\n",
- wqidx);
- return 1;
- }
- qdesc->qe_valid = 1;
- qdesc->hdwq = wqidx;
- qdesc->chann = cpu;
- phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc;
-
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
- LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT,
- cpu);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0509 Failed allocate fast-path NVME WQ (%d)\n",
- wqidx);
- return 1;
- }
- qdesc->hdwq = wqidx;
- qdesc->chann = wqidx;
- phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc;
- list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
- return 0;
-}
-
-static int
-lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
+lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
+ u32 wqesize;
int cpu;
- cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ);
- /* Create Fast Path FCP CQs */
+ cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
+ /* Create Fast Path IO CQs */
if (phba->enab_exp_wqcq_pages)
/* Increase the CQ size when WQEs contain an embedded cdb */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
@@ -8700,15 +8661,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.cq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
+ "0499 Failed allocate fast-path IO CQ (%d)\n", idx);
return 1;
}
qdesc->qe_valid = 1;
- qdesc->hdwq = wqidx;
+ qdesc->hdwq = idx;
qdesc->chann = cpu;
- phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc;
+ phba->sli4_hba.hdwq[idx].io_cq = qdesc;
- /* Create Fast Path FCP WQs */
+ /* Create Fast Path IO WQs */
if (phba->enab_exp_wqcq_pages) {
/* Increase the WQ size when WQEs contain an embedded cdb */
wqesize = (phba->fcp_embed_io) ?
@@ -8723,13 +8684,13 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0503 Failed allocate fast-path FCP WQ (%d)\n",
- wqidx);
+ "0503 Failed allocate fast-path IO WQ (%d)\n",
+ idx);
return 1;
}
- qdesc->hdwq = wqidx;
- qdesc->chann = wqidx;
- phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc;
+ qdesc->hdwq = idx;
+ qdesc->chann = cpu;
+ phba->sli4_hba.hdwq[idx].io_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
return 0;
}
@@ -8793,12 +8754,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qp->get_io_bufs = 0;
qp->put_io_bufs = 0;
qp->total_io_bufs = 0;
- spin_lock_init(&qp->abts_scsi_buf_list_lock);
- INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list);
+ spin_lock_init(&qp->abts_io_buf_list_lock);
+ INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs = 0;
- spin_lock_init(&qp->abts_nvme_buf_list_lock);
- INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list);
qp->abts_nvme_io_bufs = 0;
+ INIT_LIST_HEAD(&qp->sgl_list);
+ INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
+ spin_lock_init(&qp->hdwq_lock);
}
}
@@ -8864,7 +8826,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
qdesc->qe_valid = 1;
qdesc->hdwq = cpup->hdwq;
- qdesc->chann = cpu; /* First CPU this EQ is affinitised to */
+ qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
qdesc->last_cpu = qdesc->chann;
/* Save the allocated EQ in the Hardware Queue */
@@ -8895,41 +8857,31 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
}
- /* Allocate SCSI SLI4 CQ/WQs */
+ /* Allocate IO Path SLI4 CQ/WQs */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (lpfc_alloc_fcp_wq_cq(phba, idx))
+ if (lpfc_alloc_io_wq_cq(phba, idx))
goto out_error;
}
- /* Allocate NVME SLI4 CQ/WQs */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (lpfc_alloc_nvme_wq_cq(phba, idx))
- goto out_error;
- }
-
- if (phba->nvmet_support) {
- for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
- cpu = lpfc_find_cpu_handle(phba, idx,
- LPFC_FIND_BY_HDWQ);
- qdesc = lpfc_sli4_queue_alloc(
- phba,
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ cpu = lpfc_find_cpu_handle(phba, idx,
+ LPFC_FIND_BY_HDWQ);
+ qdesc = lpfc_sli4_queue_alloc(phba,
LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount,
cpu);
- if (!qdesc) {
- lpfc_printf_log(
- phba, KERN_ERR, LOG_INIT,
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
"CQ Set (%d)\n", idx);
- goto out_error;
- }
- qdesc->qe_valid = 1;
- qdesc->hdwq = idx;
- qdesc->chann = cpu;
- phba->sli4_hba.nvmet_cqset[idx] = qdesc;
+ goto out_error;
}
+ qdesc->qe_valid = 1;
+ qdesc->hdwq = idx;
+ qdesc->chann = cpu;
+ phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
}
@@ -8960,7 +8912,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
qdesc->qe_valid = 1;
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.els_cq = qdesc;
@@ -8978,7 +8930,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0505 Failed allocate slow-path MQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.mbx_wq = qdesc;
/*
@@ -8994,7 +8946,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"0504 Failed allocate slow-path ELS WQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.els_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
@@ -9008,7 +8960,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6079 Failed allocate NVME LS CQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
qdesc->qe_valid = 1;
phba->sli4_hba.nvmels_cq = qdesc;
@@ -9021,7 +8973,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
"6080 Failed allocate NVME LS WQ\n");
goto out_error;
}
- qdesc->chann = 0;
+ qdesc->chann = cpu;
phba->sli4_hba.nvmels_wq = qdesc;
list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
}
@@ -9164,15 +9116,13 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
/* Loop thru all Hardware Queues */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
/* Free the CQ/WQ corresponding to the Hardware Queue */
- lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
- lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
- lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
- lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
- hdwq[idx].hba_eq = NULL;
- hdwq[idx].fcp_cq = NULL;
- hdwq[idx].nvme_cq = NULL;
- hdwq[idx].fcp_wq = NULL;
- hdwq[idx].nvme_wq = NULL;
+ lpfc_sli4_queue_free(hdwq[idx].io_cq);
+ lpfc_sli4_queue_free(hdwq[idx].io_wq);
+ hdwq[idx].io_cq = NULL;
+ hdwq[idx].io_wq = NULL;
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
+ lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
}
/* Loop thru all IRQ vectors */
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
@@ -9372,8 +9322,7 @@ lpfc_setup_cq_lookup(struct lpfc_hba *phba)
list_for_each_entry(childq, &eq->child_list, list) {
if (childq->queue_id > phba->sli4_hba.cq_max)
continue;
- if ((childq->subtype == LPFC_FCP) ||
- (childq->subtype == LPFC_NVME))
+ if (childq->subtype == LPFC_IO)
phba->sli4_hba.cq_lookup[childq->queue_id] =
childq;
}
@@ -9499,31 +9448,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
/* Loop thru all Hardware Queues */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- cpu = lpfc_find_cpu_handle(phba, qidx,
- LPFC_FIND_BY_HDWQ);
- cpup = &phba->sli4_hba.cpu_map[cpu];
-
- /* Create the CQ/WQ corresponding to the
- * Hardware Queue
- */
- rc = lpfc_create_wq_cq(phba,
- phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
- qp[qidx].nvme_cq,
- qp[qidx].nvme_wq,
- &phba->sli4_hba.hdwq[qidx].nvme_cq_map,
- qidx, LPFC_NVME);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6123 Failed to setup fastpath "
- "NVME WQ/CQ (%d), rc = 0x%x\n",
- qidx, (uint32_t)rc);
- goto out_destroy;
- }
- }
- }
-
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
cpup = &phba->sli4_hba.cpu_map[cpu];
@@ -9531,14 +9455,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/* Create the CQ/WQ corresponding to the Hardware Queue */
rc = lpfc_create_wq_cq(phba,
phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
- qp[qidx].fcp_cq,
- qp[qidx].fcp_wq,
- &phba->sli4_hba.hdwq[qidx].fcp_cq_map,
- qidx, LPFC_FCP);
+ qp[qidx].io_cq,
+ qp[qidx].io_wq,
+ &phba->sli4_hba.hdwq[qidx].io_cq_map,
+ qidx,
+ LPFC_IO);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0535 Failed to setup fastpath "
- "FCP WQ/CQ (%d), rc = 0x%x\n",
+ "IO WQ/CQ (%d), rc = 0x%x\n",
qidx, (uint32_t)rc);
goto out_destroy;
}
@@ -9838,10 +9763,8 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
/* Destroy the CQ/WQ corresponding to Hardware Queue */
qp = &phba->sli4_hba.hdwq[qidx];
- lpfc_wq_destroy(phba, qp->fcp_wq);
- lpfc_wq_destroy(phba, qp->nvme_wq);
- lpfc_cq_destroy(phba, qp->fcp_cq);
- lpfc_cq_destroy(phba, qp->nvme_cq);
+ lpfc_wq_destroy(phba, qp->io_wq);
+ lpfc_cq_destroy(phba, qp->io_cq);
}
/* Loop thru all IRQ vectors */
for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
@@ -10711,7 +10634,7 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
static void
lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
{
- int i, cpu, idx, new_cpu, start_cpu, first_cpu;
+ int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
int max_phys_id, min_phys_id;
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
@@ -10753,8 +10676,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3328 CPU physid %d coreid %d\n",
- cpup->phys_id, cpup->core_id);
+ "3328 CPU %d physid %d coreid %d flag x%x\n",
+ cpu, cpup->phys_id, cpup->core_id, cpup->flag);
if (cpup->phys_id > max_phys_id)
max_phys_id = cpup->phys_id;
@@ -10812,17 +10735,17 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
cpup->eq = idx;
cpup->irq = pci_irq_vector(phba->pcidev, idx);
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3336 Set Affinity: CPU %d "
- "irq %d eq %d\n",
- cpu, cpup->irq, cpup->eq);
-
/* If this is the first CPU thats assigned to this
* vector, set LPFC_CPU_FIRST_IRQ.
*/
if (!i)
cpup->flag |= LPFC_CPU_FIRST_IRQ;
i++;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3336 Set Affinity: CPU %d "
+ "irq %d eq %d flag x%x\n",
+ cpu, cpup->irq, cpup->eq, cpup->flag);
}
}
@@ -10936,69 +10859,103 @@ found_any:
}
}
+ /* Assign hdwq indices that are unique across all cpus in the map
+ * that are also FIRST_CPUs.
+ */
+ idx = 0;
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Only FIRST IRQs get a hdwq index assignment. */
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ continue;
+
+ /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
+ cpup->hdwq = idx;
+ idx++;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3333 Set Affinity: CPU %d (phys %d core %d): "
+ "hdwq %d eq %d irq %d flg x%x\n",
+ cpu, cpup->phys_id, cpup->core_id,
+ cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ }
/* Finally we need to associate a hdwq with each cpu_map entry
* This will be 1 to 1 - hdwq to cpu, unless there are less
* hardware queues then CPUs. For that case we will just round-robin
* the available hardware queues as they get assigned to CPUs.
+ * The next_idx is the idx from the FIRST_CPU loop above to account
+ * for irq_chann < hdwq. The idx is used for round-robin assignments
+ * and needs to start at 0.
*/
- idx = 0;
+ next_idx = idx;
start_cpu = 0;
+ idx = 0;
for_each_present_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
- if (idx >= phba->cfg_hdw_queue) {
- /* We need to reuse a Hardware Queue for another CPU,
- * so be smart about it and pick one that has its
- * IRQ/EQ mapped to the same phys_id (CPU package).
- * and core_id.
- */
- new_cpu = start_cpu;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
- if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
- (new_cpup->phys_id == cpup->phys_id) &&
- (new_cpup->core_id == cpup->core_id))
- goto found_hdwq;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
- new_cpu = first_cpu;
- }
- /* If we can't match both phys_id and core_id,
- * settle for just a phys_id match.
- */
- new_cpu = start_cpu;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
- if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
- (new_cpup->phys_id == cpup->phys_id))
- goto found_hdwq;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu == nr_cpumask_bits)
- new_cpu = first_cpu;
+ /* FIRST cpus are already mapped. */
+ if (cpup->flag & LPFC_CPU_FIRST_IRQ)
+ continue;
+
+ /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
+ * of the unassigned cpus to the next idx so that all
+ * hdw queues are fully utilized.
+ */
+ if (next_idx < phba->cfg_hdw_queue) {
+ cpup->hdwq = next_idx;
+ next_idx++;
+ continue;
+ }
+
+ /* Not a First CPU and all hdw_queues are used. Reuse a
+ * Hardware Queue for another CPU, so be smart about it
+ * and pick one that has its IRQ/EQ mapped to the same phys_id
+ * (CPU package) and core_id.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+ new_cpup->phys_id == cpup->phys_id &&
+ new_cpup->core_id == cpup->core_id) {
+ goto found_hdwq;
}
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
- /* Otherwise just round robin on cfg_hdw_queue */
- cpup->hdwq = idx % phba->cfg_hdw_queue;
- goto logit;
-found_hdwq:
- /* We found an available entry, copy the IRQ info */
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu == nr_cpumask_bits)
- start_cpu = first_cpu;
- cpup->hdwq = new_cpup->hdwq;
- } else {
- /* 1 to 1, CPU to hdwq */
- cpup->hdwq = idx;
+ /* If we can't match both phys_id and core_id,
+ * settle for just a phys_id match.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
+ new_cpup->phys_id == cpup->phys_id)
+ goto found_hdwq;
+
+ new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
}
-logit:
+
+ /* Otherwise just round robin on cfg_hdw_queue */
+ cpup->hdwq = idx % phba->cfg_hdw_queue;
+ idx++;
+ goto logit;
+ found_hdwq:
+ /* We found an available entry, copy the IRQ info */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+ cpup->hdwq = new_cpup->hdwq;
+ logit:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Set Affinity: CPU %d (phys %d core %d): "
"hdwq %d eq %d irq %d flg x%x\n",
cpu, cpup->phys_id, cpup->core_id,
cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
- idx++;
}
/* The cpu_map array will be used later during initialization
@@ -11089,10 +11046,10 @@ vec_fail_out:
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI interrupt mode to device with
- * SLI-4 interface spec. The kernel function pci_enable_msi() is called
- * to enable the MSI vector. The device driver is responsible for calling
- * the request_irq() to register MSI vector with a interrupt the handler,
- * which is done in this function.
+ * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
+ * called to enable the MSI vector. The device driver is responsible for
+ * calling the request_irq() to register MSI vector with a interrupt the
+ * handler, which is done in this function.
*
* Return codes
* 0 - successful
@@ -11103,20 +11060,21 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
{
int rc, index;
- rc = pci_enable_msi(phba->pcidev);
- if (!rc)
+ rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
+ PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
+ if (rc > 0)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0487 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0488 PCI enable MSI mode failed (%d)\n", rc);
- return rc;
+ return rc ? rc : -1;
}
rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
0, LPFC_DRIVER_NAME, phba);
if (rc) {
- pci_disable_msi(phba->pcidev);
+ pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0490 MSI request_irq failed (%d)\n", rc);
return rc;
@@ -11282,11 +11240,10 @@ static void
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *qp;
- int idx, ccnt, fcnt;
+ int idx, ccnt;
int wait_time = 0;
int io_xri_cmpl = 1;
int nvmet_xri_cmpl = 1;
- int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
/* Driver just aborted IOs during the hba_unset process. Pause
@@ -11300,32 +11257,21 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
lpfc_nvme_wait_for_io_drain(phba);
ccnt = 0;
- fcnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- fcp_xri_cmpl = list_empty(
- &qp->lpfc_abts_scsi_buf_list);
- if (!fcp_xri_cmpl) /* if list is NOT empty */
- fcnt++;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- io_xri_cmpl = list_empty(
- &qp->lpfc_abts_nvme_buf_list);
- if (!io_xri_cmpl) /* if list is NOT empty */
- ccnt++;
- }
+ io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
}
if (ccnt)
io_xri_cmpl = 0;
- if (fcnt)
- fcp_xri_cmpl = 0;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvmet_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
}
- while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl ||
- !nvmet_xri_cmpl) {
+ while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
if (!nvmet_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11334,12 +11280,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
wait_time/1000);
if (!io_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6100 NVME XRI exchange busy "
- "wait time: %d seconds.\n",
- wait_time/1000);
- if (!fcp_xri_cmpl)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2877 FCP XRI exchange busy "
+ "6100 IO XRI exchange busy "
"wait time: %d seconds.\n",
wait_time/1000);
if (!els_xri_cmpl)
@@ -11355,24 +11296,15 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
}
ccnt = 0;
- fcnt = 0;
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- fcp_xri_cmpl = list_empty(
- &qp->lpfc_abts_scsi_buf_list);
- if (!fcp_xri_cmpl) /* if list is NOT empty */
- fcnt++;
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- io_xri_cmpl = list_empty(
- &qp->lpfc_abts_nvme_buf_list);
- if (!io_xri_cmpl) /* if list is NOT empty */
- ccnt++;
- }
+ io_xri_cmpl = list_empty(
+ &qp->lpfc_abts_io_buf_list);
+ if (!io_xri_cmpl) /* if list is NOT empty */
+ ccnt++;
}
if (ccnt)
io_xri_cmpl = 0;
- if (fcnt)
- fcp_xri_cmpl = 0;
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
nvmet_xri_cmpl = list_empty(
@@ -11616,6 +11548,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+ /* Check for Extended Pre-Registered SGL support */
+ phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
+
/* Check for firmware nvme support */
rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
bf_get(cfg_xib, mbx_sli4_parameters));
@@ -11646,6 +11581,7 @@ fcponly:
phba->nvme_support = 0;
phba->nvmet_support = 0;
phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvme_seg_cnt = 0;
/* If no FC4 type support, move to just SCSI support */
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
@@ -11654,6 +11590,15 @@ fcponly:
}
}
+ /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+ * accommodate 512K and 1M IOs in a single nvme buf and supply
+ * enough NVME LS iocb buffers for larger connectivity counts.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ phba->cfg_iocb_cnt = 5;
+ }
+
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
@@ -11718,6 +11663,14 @@ fcponly:
else
phba->mds_diags_support = 0;
+ /*
+ * Check if the SLI port supports NSLER
+ */
+ if (bf_get(cfg_nsler, mbx_sli4_parameters))
+ phba->nsler = 1;
+ else
+ phba->nsler = 0;
+
return 0;
}
@@ -12146,7 +12099,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
lpfc_scsi_dev_block(phba);
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
- lpfc_sli_flush_fcp_rings(phba);
+ lpfc_sli_flush_io_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -12176,7 +12129,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
lpfc_stop_hba_timers(phba);
/* Clean up all driver's outstanding SCSI I/Os */
- lpfc_sli_flush_fcp_rings(phba);
+ lpfc_sli_flush_io_rings(phba);
}
/**
@@ -12948,12 +12901,8 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
- /* Flush all driver's outstanding SCSI I/Os as we are to reset */
- lpfc_sli_flush_fcp_rings(phba);
-
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
+ /* Flush all driver's outstanding I/Os as we are to reset */
+ lpfc_sli_flush_io_rings(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -12984,12 +12933,8 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* stop all timers */
lpfc_stop_hba_timers(phba);
- /* Clean up all driver's outstanding SCSI I/Os */
- lpfc_sli_flush_fcp_rings(phba);
-
- /* Flush the outstanding NVME IOs if fc4 type enabled. */
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- lpfc_sli_flush_nvme_rings(phba);
+ /* Clean up all driver's outstanding I/Os */
+ lpfc_sli_flush_io_rings(phba);
}
/**
@@ -13530,19 +13475,6 @@ lpfc_exit(void)
pci_unregister_driver(&lpfc_driver);
fc_release_transport(lpfc_transport_template);
fc_release_transport(lpfc_vport_transport_template);
- if (_dump_buf_data) {
- printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
- "_dump_buf_data at 0x%p\n",
- (1L << _dump_buf_data_order), _dump_buf_data);
- free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
- }
-
- if (_dump_buf_dif) {
- printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
- "_dump_buf_dif at 0x%p\n",
- (1L << _dump_buf_dif_order), _dump_buf_dif);
- free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
- }
idr_destroy(&lpfc_hba_index);
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 66191fa35f63..ae09bb863497 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -72,8 +72,8 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
* lpfc_mem_alloc - create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
- * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
+ * Description: Creates and allocates PCI pools lpfc_mbuf_pool,
+ * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
* Notes: Not interrupt-safe. Must be called with no locks held. If any
@@ -89,36 +89,12 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
int i;
- if (phba->sli_rev == LPFC_SLI_REV4) {
- /* Calculate alignment */
- if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
- i = phba->cfg_sg_dma_buf_size;
- else
- i = SLI4_PAGE_SIZE;
-
- phba->lpfc_sg_dma_buf_pool =
- dma_pool_create("lpfc_sg_dma_buf_pool",
- &phba->pcidev->dev,
- phba->cfg_sg_dma_buf_size,
- i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
- goto fail;
-
- } else {
- phba->lpfc_sg_dma_buf_pool =
- dma_pool_create("lpfc_sg_dma_buf_pool",
- &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
- align, 0);
-
- if (!phba->lpfc_sg_dma_buf_pool)
- goto fail;
- }
phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
LPFC_BPL_SIZE,
align, 0);
if (!phba->lpfc_mbuf_pool)
- goto fail_free_dma_buf_pool;
+ goto fail;
pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
sizeof(struct lpfc_dmabuf),
@@ -208,9 +184,6 @@ fail_free_drb_pool:
fail_free_lpfc_mbuf_pool:
dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
- fail_free_dma_buf_pool:
- dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
- phba->lpfc_sg_dma_buf_pool = NULL;
fail:
return -ENOMEM;
}
@@ -248,25 +221,22 @@ lpfc_mem_free(struct lpfc_hba *phba)
/* Free HBQ pools */
lpfc_sli_hbqbuf_free_all(phba);
- if (phba->lpfc_nvmet_drb_pool)
- dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
+ dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
phba->lpfc_nvmet_drb_pool = NULL;
- if (phba->lpfc_drb_pool)
- dma_pool_destroy(phba->lpfc_drb_pool);
+
+ dma_pool_destroy(phba->lpfc_drb_pool);
phba->lpfc_drb_pool = NULL;
- if (phba->lpfc_hrb_pool)
- dma_pool_destroy(phba->lpfc_hrb_pool);
+
+ dma_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
- if (phba->txrdy_payload_pool)
- dma_pool_destroy(phba->txrdy_payload_pool);
+
+ dma_pool_destroy(phba->txrdy_payload_pool);
phba->txrdy_payload_pool = NULL;
- if (phba->lpfc_hbq_pool)
- dma_pool_destroy(phba->lpfc_hbq_pool);
+ dma_pool_destroy(phba->lpfc_hbq_pool);
phba->lpfc_hbq_pool = NULL;
- if (phba->rrq_pool)
- mempool_destroy(phba->rrq_pool);
+ mempool_destroy(phba->rrq_pool);
phba->rrq_pool = NULL;
/* Free NLP memory pool */
@@ -290,10 +260,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
dma_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
- /* Free DMA buffer memory pool */
- dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
- phba->lpfc_sg_dma_buf_pool = NULL;
-
/* Free Device Data memory pool */
if (phba->device_data_mem_pool) {
/* Ensure all objects have been returned to the pool */
@@ -366,6 +332,13 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
/* Free and destroy all the allocated memory pools */
lpfc_mem_free(phba);
+ /* Free DMA buffer memory pool */
+ dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
+
+ dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
+ phba->lpfc_cmd_rsp_buf_pool = NULL;
+
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
psli->iocbq_lookup = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 59252bfca14e..f4b879d25fe9 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -614,7 +614,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
out:
/* If we are authenticated, move to the proper state */
- if (ndlp->nlp_type & NLP_FCP_TARGET)
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
else
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
@@ -799,9 +799,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->writeXferRdyDis)
ndlp->nlp_flag |= NLP_FIRSTBURST;
}
- if (npr->Retry)
+ if (npr->Retry && ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_FCP_TARGET))
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ if (npr->Retry && phba->nsler &&
+ ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
+ ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+
+
/* If this driver is in nvme target mode, set the ndlp's fc4
* type to NVME provided the PRLI response claims NVME FC4
* type. Target mode does not issue gft_id so doesn't get
@@ -885,7 +891,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"1435 release_rpi SKIP UNREG x%x on "
"NPort x%x deferred x%x flg x%x "
- "Data: %p\n",
+ "Data: x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did,
ndlp->nlp_flag, ndlp);
@@ -1661,6 +1667,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
struct lpfc_dmabuf *mp;
+ struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1693,6 +1700,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
}
spin_unlock_irq(&phba->hbalock);
+ /* software abort if any GID_FT is outstanding */
+ if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
+ ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
+ if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
+ lpfc_els_abort(phba, ns_ndlp);
+ }
+
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -1814,7 +1828,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ if (lpfc_issue_els_prli(vport, ndlp, 0)) {
+ lpfc_issue_els_logo(vport, ndlp, 0);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
} else {
if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
phba->targetport->port_id = vport->fc_myDID;
@@ -2012,6 +2030,11 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (bf_get_be32(prli_init, nvpr))
ndlp->nlp_type |= NLP_NVME_INITIATOR;
+ if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
+ ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
+ else
+ ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+
/* Target driver cannot solicit NVME FB. */
if (bf_get_be32(prli_tgt, nvpr)) {
/* Complete the nvme target roles. The transport
@@ -2891,18 +2914,21 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
uint32_t);
uint32_t got_ndlp = 0;
+ uint32_t data1;
if (lpfc_nlp_get(ndlp))
got_ndlp = 1;
cur_state = ndlp->nlp_state;
+ data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+ ((uint32_t)ndlp->nlp_type));
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0211 DSM in event x%x on NPort x%x in "
"state %d rpi x%x Data: x%x x%x\n",
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
- ndlp->nlp_flag, ndlp->nlp_fc4_type);
+ ndlp->nlp_flag, data1);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM in: evt:%d ste:%d did:x%x",
@@ -2913,10 +2939,13 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* DSM out state <rc> on NPort <nlp_DID> */
if (got_ndlp) {
+ data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
+ ((uint32_t)ndlp->nlp_type));
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0212 DSM out state %d on NPort x%x "
- "rpi x%x Data: x%x\n",
- rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag);
+ "rpi x%x Data: x%x x%x\n",
+ rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
+ data1);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
"DSM out: ste:%d did:x%x flg:x%x",
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 946642cee3df..a227e36cbdc2 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6073 Binding %s HdwQueue %d (cpu %d) to "
- "hdw_queue %d qhandle %p\n", str,
+ "hdw_queue %d qhandle x%px\n", str,
qidx, qhandle->cpu_id, qhandle->index, qhandle);
*handle = (void *)qhandle;
return 0;
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
+ "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
lport, qidx, handle);
kfree(handle);
}
@@ -293,7 +293,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
struct lpfc_nvme_lport *lport = localport->private;
lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
- "6173 localport %p delete complete\n",
+ "6173 localport x%px delete complete\n",
lport);
/* release any threads waiting for the unreg to complete */
@@ -332,7 +332,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete of remoteport %p\n",
+ "6146 remoteport delete of remoteport x%px\n",
remoteport);
spin_lock_irq(&vport->phba->hbalock);
@@ -383,8 +383,8 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
- "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
- "lsreg:%p bmp:%p ndlp:%p\n",
+ "Data %px DID %x Xri: %x status %x reason x%x "
+ "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
@@ -404,7 +404,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
else
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6046 nvme cmpl without done call back? "
- "Data %p DID %x Xri: %x status %x\n",
+ "Data %px DID %x Xri: %x status %x\n",
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status);
if (ndlp) {
@@ -436,6 +436,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
return 1;
wqe = &genwqe->wqe;
+ /* Initialize only 64 bytes */
memset(wqe, 0, sizeof(union lpfc_wqe));
genwqe->context3 = (uint8_t *)bmp;
@@ -516,7 +517,8 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6050 Issue GEN REQ WQE to NPORT x%x "
- "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
+ "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
+ "xmit:%d 1st:%d\n",
ndlp->nlp_DID, genwqe->iotag,
vport->port_state,
genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
@@ -594,7 +596,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6051 Remoteport %p, rport has invalid ndlp. "
+ "6051 Remoteport x%px, rport has invalid ndlp. "
"Failing LS Req\n", pnvme_rport);
return -ENODEV;
}
@@ -646,10 +648,10 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
- "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
- ndlp->nlp_DID,
- pnvme_lport, pnvme_rport,
+ "6149 Issue LS Req to DID 0x%06x lport x%px, "
+ "rport x%px lsreq x%px rqstlen:%d rsplen:%d "
+ "%pad %pad\n",
+ ndlp->nlp_DID, pnvme_lport, pnvme_rport,
pnvme_lsreq, pnvme_lsreq->rqstlen,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
@@ -665,8 +667,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (ret != WQE_SUCCESS) {
atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6052 EXIT. issue ls wqe failed lport %p, "
- "rport %p lsreq%p Status %x DID %x\n",
+ "6052 EXIT. issue ls wqe failed lport x%px, "
+ "rport x%px lsreq x%px Status %x DID %x\n",
pnvme_lport, pnvme_rport, pnvme_lsreq,
ret, ndlp->nlp_DID);
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
@@ -723,7 +725,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
- "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
+ "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
"rsplen:%d %pad %pad\n",
pnvme_lport, pnvme_rport,
pnvme_lsreq, pnvme_lsreq->rqstlen,
@@ -984,8 +986,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (!lpfc_ncmd->nvmeCmd) {
spin_unlock(&lpfc_ncmd->buf_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
- "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
- "nvmeCmd %p\n",
+ "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
+ "nvmeCmd x%px\n",
lpfc_ncmd, lpfc_ncmd->nvmeCmd);
/* Release the lpfc_ncmd regardless of the missing elements. */
@@ -998,9 +1000,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
- if (vport->localport) {
+ if (unlikely(status && vport->localport)) {
lport = (struct lpfc_nvme_lport *)vport->localport->private;
- if (lport && status) {
+ if (lport) {
if (bf_get(lpfc_wcqe_c_xb, wcqe))
atomic_inc(&lport->cmpl_fcp_xb);
atomic_inc(&lport->cmpl_fcp_err);
@@ -1100,8 +1102,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_IOERR,
- "6032 Delay Aborted cmd %p "
- "nvme cmd %p, xri x%x, "
+ "6032 Delay Aborted cmd x%px "
+ "nvme cmd x%px, xri x%x, "
"xb %d\n",
lpfc_ncmd, nCmd,
lpfc_ncmd->cur_iocbq.sli4_xritag,
@@ -1140,7 +1142,7 @@ out_err:
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
lpfc_nvme_ktime(phba, lpfc_ncmd);
}
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
uint32_t cpu;
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
cpu = raw_smp_processor_id();
@@ -1253,6 +1255,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
sizeof(uint32_t) * 8);
cstat->control_requests++;
}
+
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
@@ -1304,14 +1309,16 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
struct scatterlist *data_sg;
struct sli4_sge *first_data_sgl;
struct ulp_bde64 *bde;
- dma_addr_t physaddr;
+ dma_addr_t physaddr = 0;
uint32_t num_bde = 0;
- uint32_t dma_len;
+ uint32_t dma_len = 0;
uint32_t dma_offset = 0;
- int nseg, i;
+ int nseg, i, j;
+ bool lsp_just_set = false;
/* Fix up the command and response DMA stuff. */
lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
@@ -1348,6 +1355,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
*/
nseg = nCmd->sg_cnt;
data_sg = nCmd->first_sgl;
+
+ /* for tracking the segment boundaries */
+ j = 2;
for (i = 0; i < nseg; i++) {
if (data_sg == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1356,23 +1366,76 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
lpfc_ncmd->seg_cnt = 0;
return 1;
}
- physaddr = data_sg->dma_address;
- dma_len = data_sg->length;
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((num_bde + 1) == nseg)
+
+ sgl->word2 = 0;
+ if ((num_bde + 1) == nseg) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ } else {
bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(dma_len);
-
- dma_offset += dma_len;
- data_sg = sg_next(data_sg);
- sgl++;
+
+ /* expand the segment */
+ if (!lsp_just_set &&
+ !((j + 1) % phba->border_sge_num) &&
+ ((nseg - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(
+ phba, lpfc_ncmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_ncmd->seg_cnt = 0;
+ return 1;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ }
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+ LPFC_SGE_TYPE_LSP)) {
+ if ((nseg - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+ physaddr = data_sg->dma_address;
+ dma_len = data_sg->length;
+ sgl->addr_lo = cpu_to_le32(
+ putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(
+ putPaddrHigh(physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ data_sg = sg_next(data_sg);
+
+ sgl++;
+
+ lsp_just_set = false;
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
}
if (phba->cfg_enable_pbde) {
/* Use PBDE support for first SGL only, offset == 0 */
@@ -1474,7 +1537,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- if (vport->load_flag & FC_UNLOADING) {
+ if (unlikely(vport->load_flag & FC_UNLOADING)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -1505,8 +1568,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ndlp = rport->ndlp;
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
- "6053 Fail IO, ndlp not ready: rport %p "
- "ndlp %p, DID x%06x\n",
+ "6053 Busy IO, ndlp not ready: rport x%px "
+ "ndlp x%px, DID x%06x\n",
rport, ndlp, pnvme_rport->port_id);
atomic_inc(&lport->xmt_fcp_err);
ret = -EBUSY;
@@ -1758,7 +1821,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6002 Abort Request to rport DID x%06x "
- "for nvme_fc_req %p\n",
+ "for nvme_fc_req x%px\n",
pnvme_rport->port_id,
pnvme_fcreq);
@@ -1767,7 +1830,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6139 Driver in reset cleanup - flushing "
@@ -1805,8 +1868,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6143 NVME req mismatch: "
- "lpfc_nbuf %p nvmeCmd %p, "
- "pnvme_fcreq %p. Skipping Abort xri x%x\n",
+ "lpfc_nbuf x%px nvmeCmd x%px, "
+ "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1815,7 +1878,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* Don't abort IOs no longer on the pending queue. */
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6142 NVME IO req %p not queued - skipping "
+ "6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1830,8 +1893,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6144 Outstanding NVME I/O Abort Request "
- "still pending on nvme_fcreq %p, "
- "lpfc_ncmd %p xri x%x\n",
+ "still pending on nvme_fcreq x%px, "
+ "lpfc_ncmd %px xri x%x\n",
pnvme_fcreq, lpfc_nbuf,
nvmereq_wqe->sli4_xritag);
goto out_unlock;
@@ -1841,7 +1904,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (!abts_buf) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6136 No available abort wqes. Skipping "
- "Abts req for nvme_fcreq %p xri x%x\n",
+ "Abts req for nvme_fcreq x%px xri x%x\n",
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
goto out_unlock;
}
@@ -1855,7 +1918,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
/* WQEs are reused. Clear stale data and set key fields to
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
*/
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+ memset(abts_wqe, 0, sizeof(*abts_wqe));
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
/* word 7 */
@@ -1892,7 +1955,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
if (ret_val) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
"6137 Failed abts issue_wqe with status x%x "
- "for nvme_fcreq %p.\n",
+ "for nvme_fcreq x%px.\n",
ret_val, pnvme_fcreq);
lpfc_sli_release_iocbq(phba, abts_buf);
return;
@@ -1982,7 +2045,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
sgl->word2 = cpu_to_le32(sgl->word2);
/* Fill in word 3 / sgl_len during cmd submission */
- /* Initialize WQE */
+ /* Initialize 64 bytes only */
memset(wqe, 0, sizeof(union lpfc_wqe));
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
@@ -2028,11 +2091,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->cur_iocbq.iotag);
- spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
list_add_tail(&lpfc_ncmd->list,
- &qp->lpfc_abts_nvme_buf_list);
+ &qp->lpfc_abts_io_buf_list);
qp->abts_nvme_io_bufs++;
- spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
}
@@ -2095,8 +2158,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
"6005 Successfully registered local "
- "NVME port num %d, localP %p, private %p, "
- "sg_seg %d\n",
+ "NVME port num %d, localP x%px, private "
+ "x%px, sg_seg %d\n",
localport->port_num, localport,
localport->private,
lpfc_nvme_template.max_sgl_segments);
@@ -2157,14 +2220,14 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
if (unlikely(!ret)) {
pending = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
if (pring->txcmplq_cnt)
pending += pring->txcmplq_cnt;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
- "6176 Lport %p Localport %p wait "
+ "6176 Lport x%px Localport x%px wait "
"timed out. Pending %d. Renewing.\n",
lport, vport->localport, pending);
continue;
@@ -2172,7 +2235,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
break;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
- "6177 Lport %p Localport %p Complete Success\n",
+ "6177 Lport x%px Localport x%px Complete Success\n",
lport, vport->localport);
}
#endif
@@ -2203,7 +2266,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6011 Destroying NVME localport %p\n",
+ "6011 Destroying NVME localport x%px\n",
localport);
/* lport's rport list is clear. Unregister
@@ -2253,12 +2316,12 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
if (!lport) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
- "6171 Update NVME fail. localP %p, No lport\n",
+ "6171 Update NVME fail. localP x%px, No lport\n",
localport);
return;
}
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6012 Update NVME lport %p did x%x\n",
+ "6012 Update NVME lport x%px did x%x\n",
localport, vport->fc_myDID);
localport->port_id = vport->fc_myDID;
@@ -2268,7 +2331,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6030 bound lport %p to DID x%06x\n",
+ "6030 bound lport x%px to DID x%06x\n",
lport, localport->port_id);
#endif
}
@@ -2317,9 +2380,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irq(&vport->phba->hbalock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
- spin_unlock_irq(&vport->phba->hbalock);
- if (!oldrport)
+ if (oldrport) {
+ prev_ndlp = oldrport->ndlp;
+ spin_unlock_irq(&vport->phba->hbalock);
+ } else {
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_nlp_get(ndlp);
+ }
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
@@ -2338,25 +2405,34 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* New remoteport record does not guarantee valid
* host private memory area.
*/
- prev_ndlp = oldrport->ndlp;
if (oldrport == remote_port->private) {
/* Same remoteport - ndlp should match.
* Just reuse.
*/
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "remoteport %p wwpn 0x%llx, "
- "Data: x%x x%x %p %p x%x x%06x\n",
+ "6014 Rebind lport to current "
+ "remoteport x%px wwpn 0x%llx, "
+ "Data: x%x x%x x%px x%px x%x "
+ " x%06x\n",
remote_port,
remote_port->port_name,
remote_port->port_id,
remote_port->port_role,
- prev_ndlp,
+ oldrport->ndlp,
ndlp,
ndlp->nlp_type,
ndlp->nlp_DID);
- return 0;
+
+ /* It's a complete rebind only if the driver
+ * is registering with the same ndlp. Otherwise
+ * the driver likely executed a node swap
+ * prior to this registration and the ndlp to
+ * remoteport binding needs to be redone.
+ */
+ if (prev_ndlp == ndlp)
+ return 0;
+
}
/* Sever the ndlp<->rport association
@@ -2390,10 +2466,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to "
- "lport %p Remoteport %p rport %p WWNN 0x%llx, "
+ "6022 Bind lport x%px to remoteport x%px "
+ "rport x%px WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
+ "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
lport, remote_port, rport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role,
@@ -2423,20 +2499,23 @@ void
lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
- struct lpfc_nvme_rport *rport;
- struct nvme_fc_remote_port *remoteport;
+ struct lpfc_nvme_rport *nrport;
+ struct nvme_fc_remote_port *remoteport = NULL;
- rport = ndlp->nrport;
+ spin_lock_irq(&vport->phba->hbalock);
+ nrport = lpfc_ndlp_get_nrport(ndlp);
+ if (nrport)
+ remoteport = nrport->remoteport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6170 Rescan NPort DID x%06x type x%x "
- "state x%x rport %p\n",
- ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport);
- if (!rport)
- goto input_err;
- remoteport = rport->remoteport;
- if (!remoteport)
- goto input_err;
+ "state x%x nrport x%px remoteport x%px\n",
+ ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
+ nrport, remoteport);
+
+ if (!nrport || !remoteport)
+ goto rescan_exit;
/* Only rescan if we are an NVME target in the MAPPED state */
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
@@ -2449,10 +2528,10 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_DID, remoteport->port_state);
}
return;
-input_err:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6169 State error: lport %p, rport%p FCID x%06x\n",
- vport->localport, ndlp->rport, ndlp->nlp_DID);
+ rescan_exit:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6169 Skip NVME Rport Rescan, NVME remoteport "
+ "unregistered\n");
#endif
}
@@ -2499,7 +2578,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
goto input_err;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6033 Unreg nvme remoteport %p, portname x%llx, "
+ "6033 Unreg nvme remoteport x%px, portname x%llx, "
"port_id x%06x, portstate x%x port type x%x\n",
remoteport, remoteport->port_name,
remoteport->port_id, remoteport->port_state,
@@ -2537,7 +2616,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
input_err:
#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6168 State error: lport %p, rport%p FCID x%06x\n",
+ "6168 State error: lport x%px, rport x%px FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
@@ -2545,6 +2624,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
+ * @lpfc_ncmd: The nvme job structure for the request being aborted.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
* NVME aborted xri. Aborted NVME IO commands are completed to the transport
@@ -2552,59 +2632,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx)
+ struct sli4_wcqe_xri_aborted *axri,
+ struct lpfc_io_buf *lpfc_ncmd)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL;
- struct lpfc_nodelist *ndlp;
- struct lpfc_sli4_hdw_queue *qp;
- unsigned long iflag = 0;
+ struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
- qp = &phba->sli4_hba.hdwq[idx];
- spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&qp->abts_nvme_buf_list_lock);
- list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
- &qp->lpfc_abts_nvme_buf_list, list) {
- if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
- list_del_init(&lpfc_ncmd->list);
- qp->abts_nvme_io_bufs--;
- lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
- lpfc_ncmd->status = IOSTAT_SUCCESS;
- spin_unlock(&qp->abts_nvme_buf_list_lock);
-
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- ndlp = lpfc_ncmd->ndlp;
- if (ndlp)
- lpfc_sli4_abts_err_handler(phba, ndlp, axri);
-
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6311 nvme_cmd %p xri x%x tag x%x "
- "abort complete and xri released\n",
- lpfc_ncmd->nvmeCmd, xri,
- lpfc_ncmd->cur_iocbq.iotag);
-
- /* Aborted NVME commands are required to not complete
- * before the abort exchange command fully completes.
- * Once completed, it is available via the put list.
- */
- if (lpfc_ncmd->nvmeCmd) {
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
- lpfc_ncmd->nvmeCmd = NULL;
- }
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
- return;
- }
- }
- spin_unlock(&qp->abts_nvme_buf_list_lock);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6312 XRI Aborted xri x%x not found\n", xri);
+ if (ndlp)
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
+ "xri released\n",
+ lpfc_ncmd->nvmeCmd, xri,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
/**
@@ -2626,13 +2680,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
return;
- /* Cycle through all NVME rings and make sure all outstanding
+ /* Cycle through all IO rings and make sure all outstanding
* WQEs have been removed from the txcmplqs.
*/
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- if (!phba->sli4_hba.hdwq[i].nvme_wq)
+ if (!phba->sli4_hba.hdwq[i].io_wq)
continue;
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
if (!pring)
continue;
@@ -2653,3 +2707,50 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
}
}
}
+
+void
+lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ struct lpfc_io_buf *lpfc_ncmd;
+ struct nvmefc_fcp_req *nCmd;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+
+ if (!pwqeIn->context1) {
+ lpfc_sli_release_iocbq(phba, pwqeIn);
+ return;
+ }
+ /* For abort iocb just return, IO iocb will do a done call */
+ if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
+ CMD_ABORT_XRI_CX) {
+ lpfc_sli_release_iocbq(phba, pwqeIn);
+ return;
+ }
+ lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+
+ spin_lock(&lpfc_ncmd->buf_lock);
+ if (!lpfc_ncmd->nvmeCmd) {
+ spin_unlock(&lpfc_ncmd->buf_lock);
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ return;
+ }
+
+ nCmd = lpfc_ncmd->nvmeCmd;
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6194 NVME Cancel xri %x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag);
+
+ nCmd->transferred_length = 0;
+ nCmd->rcv_rsplen = 0;
+ nCmd->status = NVME_SC_INTERNAL;
+ freqpriv = nCmd->private;
+ freqpriv->nvme_buf = NULL;
+ lpfc_ncmd->nvmeCmd = NULL;
+
+ spin_unlock(&lpfc_ncmd->buf_lock);
+ nCmd->done(nCmd);
+
+ /* Call release with XB=1 to queue the IO into the abort list. */
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+#endif
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index faa596f9e861..9884228800a5 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1026,7 +1026,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE
*/
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
- wq = ctxp->hdwq->nvme_wq;
+ wq = ctxp->hdwq->io_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@@ -1104,7 +1104,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
- wq = ctxp->hdwq->nvme_wq;
+ wq = ctxp->hdwq->io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return;
}
@@ -1437,7 +1437,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6408 TOTAL NVMET ctx for CPU %d "
- "MRQ %d: cnt %d nextcpu %p\n",
+ "MRQ %d: cnt %d nextcpu x%px\n",
i, j, infop->nvmet_ctx_list_cnt,
infop->nvmet_ctx_next_cpu);
}
@@ -1500,7 +1500,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6026 Registered NVME "
- "targetport: %p, private %p "
+ "targetport: x%px, private x%px "
"portnm %llx nodenm %llx segs %d qs %d\n",
phba->targetport, tgtp,
pinfo.port_name, pinfo.node_name,
@@ -1555,7 +1555,7 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
return 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6007 Update NVMET port %p did x%x\n",
+ "6007 Update NVMET port x%px did x%x\n",
phba->targetport, vport->fc_myDID);
phba->targetport->port_id = vport->fc_myDID;
@@ -1790,12 +1790,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
lpfc_nvmet_defer_release(phba, ctxp);
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
- if (ctxp->state == LPFC_NVMET_STE_RCV)
- lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
- else
- lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
return 0;
@@ -1922,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (phba->targetport) {
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
- wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
+ wq = phba->sli4_hba.hdwq[qidx].io_wq;
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
@@ -1930,7 +1926,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
- "6179 Unreg targetport %p timeout "
+ "6179 Unreg targetport x%px timeout "
"reached.\n", phba->targetport);
lpfc_nvmet_cleanup_io_context(phba);
}
@@ -3113,7 +3109,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
+ "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
ctxp, wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
@@ -3299,7 +3295,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
*/
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
@@ -3334,7 +3330,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
/* WQEs are reused. Clear stale data and set key fields to
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
*/
- memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+ memset(abts_wqe, 0, sizeof(*abts_wqe));
/* word 3 */
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index f9df800e7067..fe1097666de4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -53,8 +53,6 @@
#define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2
-int _dump_buf_done = 1;
-
static char *dif_op_str[] = {
"PROT_NORMAL",
"PROT_READ_INSERT",
@@ -89,63 +87,6 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
- void *src, *dst;
- struct scatterlist *sgde = scsi_sglist(cmnd);
-
- if (!_dump_buf_data) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
- __func__);
- return;
- }
-
-
- if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9051 BLKGRD: ERROR: data scatterlist is null\n");
- return;
- }
-
- dst = (void *) _dump_buf_data;
- while (sgde) {
- src = sg_virt(sgde);
- memcpy(dst, src, sgde->length);
- dst += sgde->length;
- sgde = sg_next(sgde);
- }
-}
-
-static void
-lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
-{
- void *src, *dst;
- struct scatterlist *sgde = scsi_prot_sglist(cmnd);
-
- if (!_dump_buf_dif) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
- __func__);
- return;
- }
-
- if (!sgde) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9053 BLKGRD: ERROR: prot scatterlist is null\n");
- return;
- }
-
- dst = _dump_buf_dif;
- while (sgde) {
- src = sg_virt(sgde);
- memcpy(dst, src, sgde->length);
- dst += sgde->length;
- sgde = sg_next(sgde);
- }
-}
-
static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd *sc)
{
@@ -537,29 +478,32 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
qp = &phba->sli4_hba.hdwq[idx];
- spin_lock(&qp->abts_scsi_buf_list_lock);
+ spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
- &qp->lpfc_abts_scsi_buf_list, list) {
+ &qp->lpfc_abts_io_buf_list, list) {
+ if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
+ continue;
+
if (psb->rdata && psb->rdata->pnode &&
psb->rdata->pnode->vport == vport)
psb->rdata = NULL;
}
- spin_unlock(&qp->abts_scsi_buf_list_lock);
+ spin_unlock(&qp->abts_io_buf_list_lock);
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
/**
- * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * FCP or NVME aborted xri.
**/
void
-lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx)
+lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri, int idx)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
@@ -577,16 +521,25 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
qp = &phba->sli4_hba.hdwq[idx];
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&qp->abts_scsi_buf_list_lock);
+ spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
- &qp->lpfc_abts_scsi_buf_list, list) {
+ &qp->lpfc_abts_io_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) {
- list_del(&psb->list);
- qp->abts_scsi_io_bufs--;
+ list_del_init(&psb->list);
psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS;
- spin_unlock(
- &qp->abts_scsi_buf_list_lock);
+#ifdef BUILD_NVME
+ if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
+ qp->abts_nvme_io_bufs--;
+ spin_unlock(&qp->abts_io_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
+ return;
+ }
+#endif
+ qp->abts_scsi_io_bufs--;
+ spin_unlock(&qp->abts_io_buf_list_lock);
+
if (psb->rdata && psb->rdata->pnode)
ndlp = psb->rdata->pnode;
else
@@ -605,12 +558,12 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&qp->abts_scsi_buf_list_lock);
+ spin_unlock(&qp->abts_io_buf_list_lock);
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
+ (iocbq->iocb_flag & LPFC_IO_LIBDFC))
continue;
if (iocbq->sli4_xritag != xri)
continue;
@@ -685,8 +638,9 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
IOCB_t *iocb;
dma_addr_t pdma_phys_fcp_rsp;
dma_addr_t pdma_phys_fcp_cmd;
- uint32_t sgl_size, cpu, idx;
+ uint32_t cpu, idx;
int tag;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
cpu = raw_smp_processor_id();
if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
@@ -704,9 +658,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
return NULL;
}
- sgl_size = phba->cfg_sg_dma_buf_size -
- (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
-
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
@@ -721,9 +672,12 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
lpfc_cmd->prot_data_type = 0;
#endif
- lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
- lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
- sizeof(struct fcp_cmnd));
+ tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
+ if (!tmp)
+ return NULL;
+
+ lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
+ lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
/*
* The first two SGEs are the FCP_CMD and FCP_RSP.
@@ -731,7 +685,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* first two and leave the rest for queuecommand.
*/
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size);
+ pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
sgl->word2 = le32_to_cpu(sgl->word2);
@@ -835,11 +789,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
qp = psb->hdwq;
if (psb->exch_busy) {
- spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
psb->pCmd = NULL;
- list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
+ list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
qp->abts_scsi_io_bufs++;
- spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
+ spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
} else {
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
}
@@ -918,9 +872,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
- return 1;
+ return 2;
}
/*
@@ -1774,7 +1729,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ "9020 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
}
@@ -1989,7 +1944,8 @@ out:
**/
static int
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
- struct sli4_sge *sgl, int datasegcnt)
+ struct sli4_sge *sgl, int datasegcnt,
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
@@ -2003,6 +1959,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t checking = 1;
uint32_t dma_len;
uint32_t dma_offset = 0;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
+ int j;
+ bool lsp_just_set = false;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
if (status)
@@ -2062,23 +2021,64 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgl++;
/* assumption: caller has already run dma_map_sg on command data */
- scsi_for_each_sg(sc, sgde, datasegcnt, i) {
- physaddr = sg_dma_address(sgde);
- dma_len = sg_dma_len(sgde);
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- if ((i + 1) == datasegcnt)
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ sgde = scsi_sglist(sc);
+ j = 3;
+ for (i = 0; i < datasegcnt; i++) {
+ /* clear it */
+ sgl->word2 = 0;
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
+ /* do we need to expand the segment */
+ if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
+ ((datasegcnt - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_cmd->seg_cnt = 0;
+ return 0;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
+ if ((datasegcnt - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ physaddr = sg_dma_address(sgde);
+ dma_len = sg_dma_len(sgde);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ sgde = sg_next(sgde);
+
+ sgl++;
+ num_sge++;
+ lsp_just_set = false;
+
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
- sgl++;
- num_sge++;
}
out:
@@ -2124,7 +2124,8 @@ out:
**/
static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
- struct sli4_sge *sgl, int datacnt, int protcnt)
+ struct sli4_sge *sgl, int datacnt, int protcnt,
+ struct lpfc_io_buf *lpfc_cmd)
{
struct scatterlist *sgde = NULL; /* s/g data entry */
struct scatterlist *sgpe = NULL; /* s/g prot entry */
@@ -2146,14 +2147,15 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
#endif
uint32_t checking = 1;
uint32_t dma_offset = 0;
- int num_sge = 0;
+ int num_sge = 0, j = 2;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
sgde = scsi_sglist(sc);
if (!sgpe || !sgde) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
+ "9082 Invalid s/g entry: data=x%px prot=x%px\n",
sgpe, sgde);
return 0;
}
@@ -2179,9 +2181,37 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
split_offset = 0;
do {
/* Check to see if we ran out of space */
- if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+ if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
+ !(phba->cfg_xpsgl))
return num_sge + 3;
+ /* DISEED and DIF have to be together */
+ if (!((j + 1) % phba->border_sge_num) ||
+ !((j + 2) % phba->border_sge_num) ||
+ !((j + 3) % phba->border_sge_num)) {
+ sgl->word2 = 0;
+
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ goto out;
+ } else {
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+ }
+
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ j = 0;
+ }
+
/* setup DISEED with what we have */
diseed = (struct sli4_sge_diseed *) sgl;
memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2228,7 +2258,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* advance sgl and increment bde count */
num_sge++;
+
sgl++;
+ j++;
/* setup the first BDE that points to protection buffer */
protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
@@ -2243,6 +2275,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = 0;
protgrp_blks = protgroup_len / 8;
protgrp_bytes = protgrp_blks * blksize;
@@ -2263,9 +2296,14 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* setup SGE's for data blocks associated with DIF data */
pgdone = 0;
subtotal = 0; /* total bytes processed for current prot grp */
+
+ sgl++;
+ j++;
+
while (!pgdone) {
/* Check to see if we ran out of space */
- if (num_sge >= phba->cfg_total_seg_cnt)
+ if ((num_sge >= phba->cfg_total_seg_cnt) &&
+ !phba->cfg_xpsgl)
return num_sge + 1;
if (!sgde) {
@@ -2274,60 +2312,101 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
__func__);
return 0;
}
- sgl++;
- dataphysaddr = sg_dma_address(sgde) + split_offset;
- remainder = sg_dma_len(sgde) - split_offset;
+ if (!((j + 1) % phba->border_sge_num)) {
+ sgl->word2 = 0;
- if ((subtotal + remainder) <= protgrp_bytes) {
- /* we can use this whole buffer */
- dma_len = remainder;
- split_offset = 0;
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
- if ((subtotal + remainder) == protgrp_bytes)
- pgdone = 1;
+ sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
+ lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ goto out;
+ } else {
+ sgl->addr_lo = cpu_to_le32(
+ putPaddrLow(sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(
+ putPaddrHigh(sgl_xtra->dma_phys_sgl));
+ }
+
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
} else {
- /* must split this buffer with next prot grp */
- dma_len = protgrp_bytes - subtotal;
- split_offset += dma_len;
- }
+ dataphysaddr = sg_dma_address(sgde) +
+ split_offset;
- subtotal += dma_len;
+ remainder = sg_dma_len(sgde) - split_offset;
- sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ if ((subtotal + remainder) <= protgrp_bytes) {
+ /* we can use this whole buffer */
+ dma_len = remainder;
+ split_offset = 0;
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
+ if ((subtotal + remainder) ==
+ protgrp_bytes)
+ pgdone = 1;
+ } else {
+ /* must split this buffer with next
+ * prot grp
+ */
+ dma_len = protgrp_bytes - subtotal;
+ split_offset += dma_len;
+ }
- num_sge++;
- curr_data++;
+ subtotal += dma_len;
- if (split_offset)
- break;
+ sgl->word2 = 0;
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ dataphysaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ dataphysaddr));
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
- /* Move to the next s/g segment if possible */
- sgde = sg_next(sgde);
+ sgl->sge_len = cpu_to_le32(dma_len);
+ dma_offset += dma_len;
+
+ num_sge++;
+ curr_data++;
+
+ if (split_offset) {
+ sgl++;
+ j++;
+ break;
+ }
+
+ /* Move to the next s/g segment if possible */
+ sgde = sg_next(sgde);
+
+ sgl++;
+ }
+
+ j++;
}
if (protgroup_offset) {
/* update the reference tag */
reftag += protgrp_blks;
- sgl++;
continue;
}
/* are we done ? */
if (curr_prot == protcnt) {
+ /* mark the last SGL */
+ sgl--;
bf_set(lpfc_sli4_sge_last, sgl, 1);
alldone = 1;
} else if (curr_prot < protcnt) {
/* advance to next prot buffer */
sgpe = sg_next(sgpe);
- sgl++;
/* update the reference tag */
reftag += protgrp_blks;
@@ -2430,7 +2509,10 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
*
* This is the protection/DIF aware version of
* lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
- * two functions eventually, but for now, it's here
+ * two functions eventually, but for now, it's here.
+ * RETURNS 0 - SUCCESS,
+ * 1 - Failed DMA map, retry.
+ * 2 - Invalid scsi cmd or prot-type. Do not rety.
**/
static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
@@ -2444,6 +2526,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ int ret = 1;
struct lpfc_vport *vport = phba->pport;
/*
@@ -2467,8 +2550,11 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
lpfc_cmd->seg_cnt = datasegcnt;
/* First check if data segment count from SCSI Layer is good */
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+ ret = 2;
goto err;
+ }
prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
@@ -2476,14 +2562,18 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
case LPFC_PG_TYPE_NO_DIF:
/* Here we need to add a PDE5 and PDE6 to the count */
- if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+ if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
+ ret = 2;
goto err;
+ }
num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
datasegcnt);
/* we should have 2 or more entries in buffer list */
- if (num_bde < 2)
+ if (num_bde < 2) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_DIF_BUF:
@@ -2507,15 +2597,19 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
* protection data segment.
*/
if ((lpfc_cmd->prot_seg_cnt * 4) >
- (phba->cfg_total_seg_cnt - 2))
+ (phba->cfg_total_seg_cnt - 2)) {
+ ret = 2;
goto err;
+ }
num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
datasegcnt, protsegcnt);
/* we should have 3 or more entries in buffer list */
if ((num_bde < 3) ||
- (num_bde > phba->cfg_total_seg_cnt))
+ (num_bde > phba->cfg_total_seg_cnt)) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_INVALID:
@@ -2526,7 +2620,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"9022 Unexpected protection group %i\n",
prot_group_type);
- return 1;
+ return 2;
}
}
@@ -2576,7 +2670,7 @@ err:
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->prot_seg_cnt = 0;
- return 1;
+ return ret;
}
/*
@@ -2809,26 +2903,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
uint32_t bgstat = bgf->bgstat;
uint64_t failing_sector = 0;
- spin_lock(&_dump_buf_lock);
- if (!_dump_buf_done) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
- " Data for %u blocks to debugfs\n",
- (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
- lpfc_debug_save_data(phba, cmd);
-
- /* If we have a prot sgl, save the DIF buffer */
- if (lpfc_prot_group_type(phba, cmd) ==
- LPFC_PG_TYPE_DIF_BUF) {
- lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
- "Saving DIF for %u blocks to debugfs\n",
- (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
- lpfc_debug_save_dif(phba, cmd);
- }
-
- _dump_buf_done = 1;
- }
- spin_unlock(&_dump_buf_lock);
-
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
@@ -2962,7 +3036,8 @@ out:
* field of @lpfc_cmd for device with SLI-4 interface spec.
*
* Return codes:
- * 1 - Error
+ * 2 - Error - Do not retry
+ * 1 - Error - Retry
* 0 - Success
**/
static int
@@ -2978,8 +3053,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
- int nseg;
+ int nseg, i, j;
struct ulp_bde64 *bde;
+ bool lsp_just_set = false;
+ struct sli4_hybrid_sgl *sgl_xtra = NULL;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -3006,15 +3083,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
sgl += 1;
first_data_sgl = sgl;
lpfc_cmd->seg_cnt = nseg;
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ if (!phba->cfg_xpsgl &&
+ lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
" %s: Too many sg segments from "
"dma_map_sg. Config %d, seg_cnt %d\n",
__func__, phba->cfg_sg_seg_cnt,
lpfc_cmd->seg_cnt);
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
lpfc_cmd->seg_cnt = 0;
scsi_dma_unmap(scsi_cmnd);
- return 1;
+ return 2;
}
/*
@@ -3026,22 +3105,80 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* the IOCB. If it can't then the BDEs get added to a BPL as it
* does for SLI-2 mode.
*/
- scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
- physaddr = sg_dma_address(sgel);
- dma_len = sg_dma_len(sgel);
- sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
- sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((num_bde + 1) == nseg)
+
+ /* for tracking segment boundaries */
+ sgel = scsi_sglist(scsi_cmnd);
+ j = 2;
+ for (i = 0; i < nseg; i++) {
+ sgl->word2 = 0;
+ if ((num_bde + 1) == nseg) {
bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ } else {
bf_set(lpfc_sli4_sge_last, sgl, 0);
- bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
- bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len = cpu_to_le32(dma_len);
- dma_offset += dma_len;
- sgl++;
+
+ /* do we need to expand the segment */
+ if (!lsp_just_set &&
+ !((j + 1) % phba->border_sge_num) &&
+ ((nseg - 1) != i)) {
+ /* set LSP type */
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_LSP);
+
+ sgl_xtra = lpfc_get_sgl_per_hdwq(
+ phba, lpfc_cmd);
+
+ if (unlikely(!sgl_xtra)) {
+ lpfc_cmd->seg_cnt = 0;
+ scsi_dma_unmap(scsi_cmnd);
+ return 1;
+ }
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ sgl_xtra->dma_phys_sgl));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ sgl_xtra->dma_phys_sgl));
+
+ } else {
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ }
+ }
+
+ if (!(bf_get(lpfc_sli4_sge_type, sgl) &
+ LPFC_SGE_TYPE_LSP)) {
+ if ((nseg - 1) == i)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+
+ physaddr = sg_dma_address(sgel);
+ dma_len = sg_dma_len(sgel);
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(
+ physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(
+ physaddr));
+
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ sgel = sg_next(sgel);
+
+ sgl++;
+ lsp_just_set = false;
+
+ } else {
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(
+ phba->cfg_sg_dma_buf_size);
+
+ sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
+ i = i - 1;
+
+ lsp_just_set = true;
+ }
+
+ j++;
}
/*
* Setup the first Payload BDE. For FCoE we just key off
@@ -3110,6 +3247,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
* This is the protection/DIF aware version of
* lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
* two functions eventually, but for now, it's here
+ * Return codes:
+ * 2 - Error - Do not retry
+ * 1 - Error - Retry
+ * 0 - Success
**/
static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
@@ -3123,6 +3264,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
int prot_group_type = 0;
int fcpdl;
+ int ret = 1;
struct lpfc_vport *vport = phba->pport;
/*
@@ -3152,23 +3294,33 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
lpfc_cmd->seg_cnt = datasegcnt;
/* First check if data segment count from SCSI Layer is good */
- if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+ if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
+ !phba->cfg_xpsgl) {
+ WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
+ ret = 2;
goto err;
+ }
prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
switch (prot_group_type) {
case LPFC_PG_TYPE_NO_DIF:
/* Here we need to add a DISEED to the count */
- if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+ if (((lpfc_cmd->seg_cnt + 1) >
+ phba->cfg_total_seg_cnt) &&
+ !phba->cfg_xpsgl) {
+ ret = 2;
goto err;
+ }
num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
- datasegcnt);
+ datasegcnt, lpfc_cmd);
/* we should have 2 or more entries in buffer list */
- if (num_sge < 2)
+ if (num_sge < 2) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_DIF_BUF:
@@ -3190,17 +3342,23 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
* There is a minimun of 3 SGEs used for every
* protection data segment.
*/
- if ((lpfc_cmd->prot_seg_cnt * 3) >
- (phba->cfg_total_seg_cnt - 2))
+ if (((lpfc_cmd->prot_seg_cnt * 3) >
+ (phba->cfg_total_seg_cnt - 2)) &&
+ !phba->cfg_xpsgl) {
+ ret = 2;
goto err;
+ }
num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
- datasegcnt, protsegcnt);
+ datasegcnt, protsegcnt, lpfc_cmd);
/* we should have 3 or more entries in buffer list */
- if ((num_sge < 3) ||
- (num_sge > phba->cfg_total_seg_cnt))
+ if (num_sge < 3 ||
+ (num_sge > phba->cfg_total_seg_cnt &&
+ !phba->cfg_xpsgl)) {
+ ret = 2;
goto err;
+ }
break;
case LPFC_PG_TYPE_INVALID:
@@ -3211,7 +3369,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"9083 Unexpected protection group %i\n",
prot_group_type);
- return 1;
+ return 2;
}
}
@@ -3273,7 +3431,7 @@ err:
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->prot_seg_cnt = 0;
- return 1;
+ return ret;
}
/**
@@ -3839,7 +3997,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0710 Iodone <%d/%llu> cmd %p, error "
+ "0710 Iodone <%d/%llu> cmd x%px, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3), cmd->retries,
@@ -4454,8 +4612,12 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
}
- if (err)
+ if (err == 2) {
+ cmnd->result = DID_ERROR << 16;
+ goto out_fail_command_release_buf;
+ } else if (err) {
goto out_host_busy_free_buf;
+ }
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
@@ -4526,6 +4688,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
out_tgt_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
+ out_fail_command_release_buf:
+ lpfc_release_scsi_buf(phba, lpfc_cmd);
+
out_fail_command:
cmnd->scsi_done(cmnd);
return 0;
@@ -4568,7 +4733,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&phba->hbalock, flags);
/* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
@@ -4589,7 +4754,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
iocb = &lpfc_cmd->cur_iocbq;
if (phba->sli_rev == LPFC_SLI_REV4) {
- pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
+ pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
goto out_unlock_buf;
@@ -4956,7 +5121,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0797 Tgt Map rport failure: rdata x%p\n", rdata);
+ "0797 Tgt Map rport failure: rdata x%px\n", rdata);
return FAILED;
}
pnode = rdata->pnode;
@@ -5054,7 +5219,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0798 Device Reset rdata failure: rdata x%p\n",
+ "0798 Device Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
@@ -5066,7 +5231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0721 Device Reset rport failure: rdata x%p\n", rdata);
+ "0721 Device Reset rport failure: rdata x%px\n", rdata);
return FAILED;
}
@@ -5125,7 +5290,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0799 Target Reset rdata failure: rdata x%p\n",
+ "0799 Target Reset rdata failure: rdata x%px\n",
rdata);
return FAILED;
}
@@ -5137,7 +5302,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_chk_tgt_mapped(vport, cmnd);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0722 Target Reset rport failure: rdata x%p\n", rdata);
+ "0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
spin_lock_irq(shost->host_lock);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
@@ -5295,18 +5460,20 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
lpfc_offline(phba);
rc = lpfc_sli_brdrestart(phba);
if (rc)
- ret = FAILED;
+ goto error;
+
rc = lpfc_online(phba);
if (rc)
- ret = FAILED;
+ goto error;
+
lpfc_unblock_mgmt_io(phba);
- if (ret == FAILED) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "3323 Failed host reset, bring it offline\n");
- lpfc_sli4_offline_eratt(phba);
- }
return ret;
+error:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "3323 Failed host reset\n");
+ lpfc_unblock_mgmt_io(phba);
+ return FAILED;
}
/**
@@ -5870,7 +6037,7 @@ struct scsi_host_template lpfc_template_no_hr = {
.sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
.cmd_per_lun = LPFC_CMD_PER_LUN,
.shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = 0xFFFFFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f9e6a135d656..a0c6945b8139 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1391,9 +1391,12 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (!piocb->iocb_cmpl)
- lpfc_sli_release_iocbq(phba, piocb);
- else {
+ if (!piocb->iocb_cmpl) {
+ if (piocb->iocb_flag & LPFC_IO_NVME)
+ lpfc_nvme_cancel_iocb(phba, piocb);
+ else
+ lpfc_sli_release_iocbq(phba, piocb);
+ } else {
piocb->iocb.ulpStatus = ulpstatus;
piocb->iocb.un.ulpWord[4] = ulpWord4;
(piocb->iocb_cmpl) (phba, piocb, piocb);
@@ -2426,6 +2429,20 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
return;
}
+static void
+__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ unsigned long iflags;
+
+ if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
+ lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
+ spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
+ }
+ ndlp->nlp_flag &= ~NLP_UNREG_INP;
+}
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
@@ -2497,7 +2514,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport,
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
"1438 UNREG cmpl deferred mbox x%x "
- "on NPort x%x Data: x%x x%x %p\n",
+ "on NPort x%x Data: x%x x%x %px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
@@ -2507,7 +2524,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
} else {
- ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ __lpfc_sli_rpi_release(vport, ndlp);
}
pmb->ctx_ndlp = NULL;
}
@@ -2555,7 +2572,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport, KERN_INFO, LOG_MBOX | LOG_SLI,
"0010 UNREG_LOGIN vpi:%x "
"rpi:%x DID:%x defer x%x flg x%x "
- "map:%x %p\n",
+ "map:%x %px\n",
vport->vpi, ndlp->nlp_rpi,
ndlp->nlp_DID, ndlp->nlp_defer_did,
ndlp->nlp_flag,
@@ -2573,7 +2590,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport, KERN_INFO, LOG_DISCOVERY,
"4111 UNREG cmpl deferred "
"clr x%x on "
- "NPort x%x Data: x%x %p\n",
+ "NPort x%x Data: x%x x%px\n",
ndlp->nlp_rpi, ndlp->nlp_DID,
ndlp->nlp_defer_did, ndlp);
ndlp->nlp_flag &= ~NLP_UNREG_INP;
@@ -2582,7 +2599,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_issue_els_plogi(
vport, ndlp->nlp_DID, 0);
} else {
- ndlp->nlp_flag &= ~NLP_UNREG_INP;
+ __lpfc_sli_rpi_release(vport, ndlp);
}
}
}
@@ -2695,7 +2712,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
- "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
+ "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
@@ -3961,7 +3978,7 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
lpfc_sli_abort_iocb_ring(phba, pring);
}
} else {
@@ -3971,17 +3988,17 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
+ * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
* @phba: Pointer to HBA context object.
*
- * This function flushes all iocbs in the fcp ring and frees all the iocb
+ * This function flushes all iocbs in the IO ring and frees all the iocb
* objects in txq and txcmplq. This function will not issue abort iocbs
* for all the iocb commands in txcmplq, they will just be returned with
* IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
* slot has been permanently disabled.
**/
void
-lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
+lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
{
LIST_HEAD(txq);
LIST_HEAD(txcmplq);
@@ -3992,13 +4009,13 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
/* Indicate the I/O queues are flushed */
- phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
+ phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
spin_lock_irq(&pring->ring_lock);
/* Retrieve everything on txq */
@@ -4046,56 +4063,6 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
- * @phba: Pointer to HBA context object.
- *
- * This function flushes all wqes in the nvme rings and frees all resources
- * in the txcmplq. This function does not issue abort wqes for the IO
- * commands in txcmplq, they will just be returned with
- * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
- * slot has been permanently disabled.
- **/
-void
-lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
-{
- LIST_HEAD(txcmplq);
- struct lpfc_sli_ring *pring;
- uint32_t i;
- struct lpfc_iocbq *piocb, *next_iocb;
-
- if ((phba->sli_rev < LPFC_SLI_REV4) ||
- !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
- return;
-
- /* Hint to other driver operations that a flush is in progress. */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
- spin_unlock_irq(&phba->hbalock);
-
- /* Cycle through all NVME rings and complete each IO with
- * a local driver reason code. This is a flush so no
- * abort exchange to FW.
- */
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
-
- spin_lock_irq(&pring->ring_lock);
- list_for_each_entry_safe(piocb, next_iocb,
- &pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
- /* Retrieve everything on the txcmplq */
- list_splice_init(&pring->txcmplq, &txcmplq);
- pring->txcmplq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
-
- /* Flush the txcmpq &&&PAE */
- lpfc_sli_cancel_iocbs(phba, &txcmplq,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_DOWN);
- }
-}
-
-/**
* lpfc_sli_brdready_s3 - Check for sli3 host ready status
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
@@ -4495,7 +4462,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
* checking during resets the device. The caller is not required to hold
* any locks.
*
- * This function returns 0 always.
+ * This function returns 0 on success else returns negative error code.
**/
int
lpfc_sli4_brdreset(struct lpfc_hba *phba)
@@ -4652,8 +4619,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
rc = lpfc_sli4_brdreset(phba);
- if (rc)
- return rc;
+ if (rc) {
+ phba->link_state = LPFC_HBA_ERROR;
+ goto hba_down_queue;
+ }
spin_lock_irq(&phba->hbalock);
phba->pport->stopped = 0;
@@ -4668,6 +4637,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
if (hba_aer_enabled)
pci_disable_pcie_error_reporting(phba->pcidev);
+hba_down_queue:
lpfc_hba_down_post(phba);
lpfc_sli4_queue_destroy(phba);
@@ -5584,10 +5554,8 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
qp = &sli4_hba->hdwq[qidx];
/* ARM the corresponding CQ */
- sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
- LPFC_QUEUE_REARM);
- sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
- LPFC_QUEUE_REARM);
+ sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
+ LPFC_QUEUE_REARM);
}
/* Loop thru all IRQ vectors */
@@ -7243,7 +7211,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
else
phba->hba_flag &= ~HBA_FIP_SUPPORT;
- phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
+ phba->hba_flag &= ~HBA_IOQ_FLUSH;
if (phba->sli_rev != LPFC_SLI_REV4) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7972,7 +7940,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
+ "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
mb->mbxCommand,
phba->pport->port_state,
phba->sli.sli_flag,
@@ -9333,11 +9301,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
memset(wqe, 0, sizeof(union lpfc_wqe128));
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
- /* The ct field has moved so reset */
- wqe->generic.wqe_com.word7 = 0;
- wqe->generic.wqe_com.word10 = 0;
- }
+ /* The ct field has moved so reset */
+ wqe->generic.wqe_com.word7 = 0;
+ wqe->generic.wqe_com.word10 = 0;
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
@@ -9796,7 +9762,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* we re-construct this WQE here based on information in
* iocbq from scratch.
*/
- memset(wqe, 0, sizeof(union lpfc_wqe));
+ memset(wqe, 0, sizeof(*wqe));
/* OX_ID is invariable to who sent ABTS to CT exchange */
bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
@@ -9843,6 +9809,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
break;
case CMD_SEND_FRAME:
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
+ bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
+ bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
+ bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
return 0;
@@ -9904,7 +9879,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
/* Get the WQ */
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
+ wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
} else {
wq = phba->sli4_hba.els_wq;
}
@@ -10051,7 +10026,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
- return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
+ return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
return NULL;
@@ -10504,7 +10479,7 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
+ pring = phba->sli4_hba.hdwq[i].io_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_FCP_RING;
pring->txcmplq_cnt = 0;
@@ -10523,16 +10498,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
spin_lock_init(&pring->ring_lock);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
- pring->flag = 0;
- pring->ringno = LPFC_FCP_RING;
- pring->txcmplq_cnt = 0;
- INIT_LIST_HEAD(&pring->txq);
- INIT_LIST_HEAD(&pring->txcmplq);
- INIT_LIST_HEAD(&pring->iocb_continueq);
- spin_lock_init(&pring->ring_lock);
- }
pring = phba->sli4_hba.nvmels_wq->pring;
pring->flag = 0;
pring->ringno = LPFC_ELS_RING;
@@ -10796,9 +10761,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
pring = qp->pring;
if (!pring)
continue;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
list_splice_init(&pring->txq, &completions);
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
if (pring == phba->sli4_hba.els_wq->pring) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
@@ -10979,7 +10944,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0402 Cannot find virtual addr for buffer tag on "
- "ring %d Data x%lx x%p x%p x%x\n",
+ "ring %d Data x%lx x%px x%px x%x\n",
pring->ringno, (unsigned long) tag,
slp->next, slp->prev, pring->postbufq_cnt);
@@ -11023,7 +10988,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0410 Cannot find virtual addr for mapped buf on "
- "ring %d Data x%llx x%p x%p x%x\n",
+ "ring %d Data x%llx x%px x%px x%x\n",
pring->ringno, (unsigned long long)phys,
slp->next, slp->prev, pring->postbufq_cnt);
return NULL;
@@ -11078,13 +11043,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
abort_iocb = phba->sli.iocbq_lookup[abort_context];
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
- "0327 Cannot abort els iocb %p "
+ "0327 Cannot abort els iocb x%px "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]);
spin_unlock_irq(&phba->hbalock);
+ if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
+ lpfc_sli_release_iocbq(phba, abort_iocb);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -11493,7 +11461,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
int i;
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
+ if (phba->hba_flag & HBA_IOQ_FLUSH)
return errcnt;
for (i = 1; i <= phba->sli.last_iotag; i++) {
@@ -11603,7 +11571,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
spin_lock_irqsave(&phba->hbalock, iflags);
/* all I/Os are in process of being flushed */
- if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
return 0;
}
@@ -11627,7 +11595,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
if (phba->sli_rev == LPFC_SLI_REV4) {
pring_s4 =
- phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
+ phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
if (!pring_s4) {
spin_unlock(&lpfc_cmd->buf_lock);
continue;
@@ -13336,8 +13304,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
unsigned long iflags;
switch (cq->subtype) {
- case LPFC_FCP:
- lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
+ case LPFC_IO:
+ lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ }
workposted = false;
break;
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
@@ -13355,15 +13328,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
- case LPFC_NVME:
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support)
- lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
- else
- lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
-
- workposted = false;
- break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0603 Invalid CQ subtype %d: "
@@ -13691,7 +13655,7 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
&delay);
break;
case LPFC_WCQ:
- if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
+ if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe,
&delay);
@@ -14008,10 +13972,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->CQ_wq++;
/* Process the WQ complete event */
phba->last_completion_time = jiffies;
- if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
- lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
- (struct lpfc_wcqe_complete *)&wcqe);
- if (cq->subtype == LPFC_NVME_LS)
+ if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
(struct lpfc_wcqe_complete *)&wcqe);
break;
@@ -16918,6 +16879,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
struct fc_vft_header *fc_vft_hdr;
uint32_t *header = (uint32_t *) fc_hdr;
+#define FC_RCTL_MDS_DIAGS 0xF4
+
switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */
case FC_RCTL_DD_SOL_DATA: /* solicited data */
@@ -17445,7 +17408,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
ctiocb->context1 = lpfc_nlp_get(ndlp);
- ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
@@ -17928,6 +17890,17 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fcfi = bf_get(lpfc_rcqe_fcf_id,
&dmabuf->cq_event.cqe.rcqe_cmpl);
+ if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
+ vport = phba->pport;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2023 MDS Loopback %d bytes\n",
+ bf_get(lpfc_rcqe_length,
+ &dmabuf->cq_event.cqe.rcqe_cmpl));
+ /* Handle MDS Loopback frames */
+ lpfc_sli4_handle_mds_loopback(vport, dmabuf);
+ return;
+ }
+
/* d_id this frame is directed to */
did = sli4_did_from_fc_hdr(fc_hdr);
@@ -18211,6 +18184,10 @@ __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2016 rpi %x not inuse\n",
+ rpi);
}
}
@@ -19461,7 +19438,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
- wq = phba->sli4_hba.hdwq[0].fcp_wq;
+ wq = phba->sli4_hba.hdwq[0].io_wq;
if (unlikely(!wq))
return 0;
pring = wq->pring;
@@ -19712,10 +19689,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
/* NVME_FCREQ and NVME_ABTS requests */
if (pwqe->iocb_flag & LPFC_IO_NVME) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- wq = qp->nvme_wq;
+ wq = qp->io_wq;
pring = wq->pring;
- bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -19732,7 +19709,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
/* NVMET requests */
if (pwqe->iocb_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
- wq = qp->nvme_wq;
+ wq = qp->io_wq;
pring = wq->pring;
ctxp = pwqe->context2;
@@ -19743,7 +19720,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
pwqe->sli4_xritag);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -19790,9 +19767,7 @@ void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
pvt_pool = &qp->p_multixri_pool->pvt_pool;
pbl_pool = &qp->p_multixri_pool->pbl_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
- if (qp->nvme_wq)
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
multixri_pool->stat_pbl_count = pbl_pool->count;
multixri_pool->stat_pvt_count = pvt_pool->count;
@@ -19862,12 +19837,9 @@ void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
watermark_max = xri_limit;
watermark_min = xri_limit / 2;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
abts_io_bufs = qp->abts_scsi_io_bufs;
- if (qp->nvme_wq) {
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
- abts_io_bufs += qp->abts_nvme_io_bufs;
- }
+ abts_io_bufs += qp->abts_nvme_io_bufs;
new_watermark = txcmplq_cnt + abts_io_bufs;
new_watermark = min(watermark_max, new_watermark);
@@ -20142,12 +20114,9 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
pbl_pool = &qp->p_multixri_pool->pbl_pool;
pvt_pool = &qp->p_multixri_pool->pvt_pool;
- txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
+ txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
abts_io_bufs = qp->abts_scsi_io_bufs;
- if (qp->nvme_wq) {
- txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
- abts_io_bufs += qp->abts_nvme_io_bufs;
- }
+ abts_io_bufs += qp->abts_nvme_io_bufs;
xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
xri_limit = qp->p_multixri_pool->xri_limit;
@@ -20188,6 +20157,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
iflag);
}
+
+ if (phba->cfg_xpsgl && !phba->nvmet_support &&
+ !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
+ lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
+
+ if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
+ lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
}
/**
@@ -20402,3 +20378,288 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
return lpfc_cmd;
}
+
+/**
+ * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to append the SGL chunk
+ *
+ * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
+ * and will allocate an SGL chunk if the pool is empty.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to sli4_hybrid_sgl - Success
+ **/
+struct sli4_hybrid_sgl *
+lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+ struct sli4_hybrid_sgl *allocated_sgl = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->sgl_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(buf_list))) {
+ /* break off 1 chunk from the sgl_list */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list, list_node) {
+ list_move_tail(&list_entry->list_node,
+ &lpfc_buf->dma_sgl_xtra_list);
+ break;
+ }
+ } else {
+ /* allocate more */
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+ cpu_to_node(smp_processor_id()));
+ if (!tmp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8353 error kmalloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ return NULL;
+ }
+
+ tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_ATOMIC, &tmp->dma_phys_sgl);
+ if (!tmp->dma_sgl) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8354 error pool_alloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ kfree(tmp);
+ return NULL;
+ }
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
+ }
+
+ allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
+ struct sli4_hybrid_sgl,
+ list_node);
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+
+ return allocated_sgl;
+}
+
+/**
+ * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the SGL chunk
+ *
+ * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
+ *
+ * Return codes:
+ * 0 - Success
+ * -EINVAL - Error
+ **/
+int
+lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
+{
+ int rc = 0;
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->sgl_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
+ list_for_each_entry_safe(list_entry, tmp,
+ &lpfc_buf->dma_sgl_xtra_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ buf_list);
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ return rc;
+}
+
+/**
+ * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup sgl buff resources on
+ *
+ * This routine frees all SGL chunks of hdwq SGL chunk pool.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq)
+{
+ struct list_head *buf_list = &hdwq->sgl_list;
+ struct sli4_hybrid_sgl *list_entry = NULL;
+ struct sli4_hybrid_sgl *tmp = NULL;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ /* Free sgl pool */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list, list_node) {
+ dma_pool_free(phba->lpfc_sg_dma_buf_pool,
+ list_entry->dma_sgl,
+ list_entry->dma_phys_sgl);
+ list_del(&list_entry->list_node);
+ kfree(list_entry);
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+}
+
+/**
+ * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
+ *
+ * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
+ * and will allocate an CMD/RSP buffer if the pool is empty.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to fcp_cmd_rsp_buf - Success
+ **/
+struct fcp_cmd_rsp_buf *
+lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_buf)
+{
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+ struct fcp_cmd_rsp_buf *allocated_buf = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(buf_list))) {
+ /* break off 1 chunk from the list */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ &lpfc_buf->dma_cmd_rsp_list);
+ break;
+ }
+ } else {
+ /* allocate more */
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
+ cpu_to_node(smp_processor_id()));
+ if (!tmp) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8355 error kmalloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ return NULL;
+ }
+
+ tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+ GFP_ATOMIC,
+ &tmp->fcp_cmd_rsp_dma_handle);
+
+ if (!tmp->fcp_cmnd) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "8356 error pool_alloc memory for HDWQ "
+ "%d %s\n",
+ lpfc_buf->hdwq_no, __func__);
+ kfree(tmp);
+ return NULL;
+ }
+
+ tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
+ sizeof(struct fcp_cmnd));
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+ list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
+ }
+
+ allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
+ struct fcp_cmd_rsp_buf,
+ list_node);
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+
+ return allocated_buf;
+}
+
+/**
+ * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
+ * @phba: The HBA for which this call is being executed.
+ * @lpfc_buf: IO buf structure with the CMD/RSP buf
+ *
+ * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
+ *
+ * Return codes:
+ * 0 - Success
+ * -EINVAL - Error
+ **/
+int
+lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *lpfc_buf)
+{
+ int rc = 0;
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+ struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
+ list_for_each_entry_safe(list_entry, tmp,
+ &lpfc_buf->dma_cmd_rsp_list,
+ list_node) {
+ list_move_tail(&list_entry->list_node,
+ buf_list);
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+ return rc;
+}
+
+/**
+ * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
+ * @phba: phba object
+ * @hdwq: hdwq to cleanup cmd rsp buff resources on
+ *
+ * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq)
+{
+ struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
+ struct fcp_cmd_rsp_buf *list_entry = NULL;
+ struct fcp_cmd_rsp_buf *tmp = NULL;
+
+ spin_lock_irq(&hdwq->hdwq_lock);
+
+ /* Free cmd_rsp buf pool */
+ list_for_each_entry_safe(list_entry, tmp,
+ buf_list,
+ list_node) {
+ dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
+ list_entry->fcp_cmnd,
+ list_entry->fcp_cmd_rsp_dma_handle);
+ list_del(&list_entry->list_node);
+ kfree(list_entry);
+ }
+
+ spin_unlock_irq(&hdwq->hdwq_lock);
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 467b8270f7fd..37fbcb46387e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -365,9 +365,18 @@ struct lpfc_io_buf {
/* Common fields */
struct list_head list;
void *data;
+
dma_addr_t dma_handle;
dma_addr_t dma_phys_sgl;
- struct sli4_sge *dma_sgl;
+
+ struct sli4_sge *dma_sgl; /* initial segment chunk */
+
+ /* linked list of extra sli4_hybrid_sge */
+ struct list_head dma_sgl_xtra_list;
+
+ /* list head for fcp_cmd_rsp buf */
+ struct list_head dma_cmd_rsp_list;
+
struct lpfc_iocbq cur_iocbq;
struct lpfc_sli4_hdw_queue *hdwq;
uint16_t hdwq_no;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a81ef0293696..0d4882a9e634 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -49,9 +49,6 @@
#define LPFC_FCP_MQ_THRESHOLD_MAX 256
#define LPFC_FCP_MQ_THRESHOLD_DEF 8
-/* Common buffer size to accomidate SCSI and NVME IO buffers */
-#define LPFC_COMMON_IO_BUF_SZ 768
-
/*
* Provide the default FCF Record attributes used by the driver
* when nonFIP mode is configured and there is no other default
@@ -114,9 +111,8 @@ enum lpfc_sli4_queue_type {
enum lpfc_sli4_queue_subtype {
LPFC_NONE,
LPFC_MBOX,
- LPFC_FCP,
+ LPFC_IO,
LPFC_ELS,
- LPFC_NVME,
LPFC_NVMET,
LPFC_NVME_LS,
LPFC_USOL
@@ -646,22 +642,17 @@ struct lpfc_eq_intr_info {
struct lpfc_sli4_hdw_queue {
/* Pointers to the constructed SLI4 queues */
struct lpfc_queue *hba_eq; /* Event queues for HBA */
- struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
- struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
- struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
- struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
- uint16_t fcp_cq_map;
- uint16_t nvme_cq_map;
+ struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
+ struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
+ uint16_t io_cq_map;
/* Keep track of IO buffers for this hardware queue */
spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
struct list_head lpfc_io_buf_list_get;
spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
struct list_head lpfc_io_buf_list_put;
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
- struct list_head lpfc_abts_scsi_buf_list;
- spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
- struct list_head lpfc_abts_nvme_buf_list;
+ spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
+ struct list_head lpfc_abts_io_buf_list;
uint32_t total_io_bufs;
uint32_t get_io_bufs;
uint32_t put_io_bufs;
@@ -685,6 +676,13 @@ struct lpfc_sli4_hdw_queue {
uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
#endif
+
+ /* Per HDWQ pool resources */
+ struct list_head sgl_list;
+ struct list_head cmd_rsp_buf_list;
+
+ /* Lock for syncing Per HDWQ pool resources */
+ spinlock_t hdwq_lock;
};
#ifdef LPFC_HDWQ_LOCK_STAT
@@ -850,8 +848,8 @@ struct lpfc_sli4_hba {
struct lpfc_queue **cq_lookup;
struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
- spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
- struct list_head lpfc_abts_scsi_buf_list;
+ spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
+ struct list_head lpfc_abts_io_buf_list;
struct list_head lpfc_nvmet_sgl_list;
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
struct list_head lpfc_abts_nvmet_ctx_list;
@@ -1056,10 +1054,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
- struct sli4_wcqe_xri_aborted *, int);
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
- struct sli4_wcqe_xri_aborted *axri, int idx);
+ struct sli4_wcqe_xri_aborted *axri,
+ struct lpfc_io_buf *lpfc_ncmd);
+void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri, int idx);
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
@@ -1094,6 +1093,17 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
+struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
+int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_io_buf *buf);
+void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq);
+void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
+ struct lpfc_sli4_hdw_queue *hdwq);
static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
{
return q->q_pgs[idx / q->entry_cnt_per_pg] +
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f7e93aaf1e00..b8aae31ffda3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.2.0.3"
+#define LPFC_DRIVER_VERSION "12.4.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 343bc71d4615..b76646357980 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -527,9 +527,11 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
- spin_unlock_irq(shost->host_lock);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a14e8344822b..a6e788c02ff4 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2429,6 +2429,7 @@ struct megasas_instance {
u8 adapter_type;
bool consistent_mask_64bit;
bool support_nvme_passthru;
+ bool enable_sdev_max_qd;
u8 task_abort_tmo;
u8 max_reset_tmo;
u8 snapdump_wait_time;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f9f07935556e..42cf38c1ea99 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -109,6 +109,10 @@ int event_log_level = MFI_EVT_CLASS_CRITICAL;
module_param(event_log_level, int, 0644);
MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
+unsigned int enable_sdev_max_qd;
+module_param(enable_sdev_max_qd, int, 0444);
+MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -1941,25 +1945,19 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
}
-
/*
- * megasas_set_static_target_properties -
- * Device property set by driver are static and it is not required to be
- * updated after OCR.
- *
- * set io timeout
- * set device queue depth
- * set nvme device properties. see - megasas_set_nvme_device_properties
+ * megasas_set_fw_assisted_qd -
+ * set device queue depth to can_queue
+ * set device queue depth to fw assisted qd
*
* @sdev: scsi device
* @is_target_prop true, if fw provided target properties.
*/
-static void megasas_set_static_target_properties(struct scsi_device *sdev,
+static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
bool is_target_prop)
{
u8 interface_type;
u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
- u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
u32 tgt_device_qd;
struct megasas_instance *instance;
struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -1968,11 +1966,6 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
mr_device_priv_data = sdev->hostdata;
interface_type = mr_device_priv_data->interface_type;
- /*
- * The RAID firmware may require extended timeouts.
- */
- blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
-
switch (interface_type) {
case SAS_PD:
device_qd = MEGASAS_SAS_QD;
@@ -1990,18 +1983,49 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev,
if (tgt_device_qd &&
(tgt_device_qd <= instance->host->can_queue))
device_qd = tgt_device_qd;
+ }
- /* max_io_size_kb will be set to non zero for
- * nvme based vd and syspd.
- */
+ if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
+ device_qd = instance->host->can_queue;
+
+ scsi_change_queue_depth(sdev, device_qd);
+}
+
+/*
+ * megasas_set_static_target_properties -
+ * Device property set by driver are static and it is not required to be
+ * updated after OCR.
+ *
+ * set io timeout
+ * set device queue depth
+ * set nvme device properties. see - megasas_set_nvme_device_properties
+ *
+ * @sdev: scsi device
+ * @is_target_prop true, if fw provided target properties.
+ */
+static void megasas_set_static_target_properties(struct scsi_device *sdev,
+ bool is_target_prop)
+{
+ u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
+ struct megasas_instance *instance;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ /*
+ * The RAID firmware may require extended timeouts.
+ */
+ blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
+
+ /* max_io_size_kb will be set to non zero for
+ * nvme based vd and syspd.
+ */
+ if (is_target_prop)
max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
- }
if (instance->nvme_page_size && max_io_size_kb)
megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
- scsi_change_queue_depth(sdev, device_qd);
-
+ megasas_set_fw_assisted_qd(sdev, is_target_prop);
}
@@ -3285,6 +3309,48 @@ fw_cmds_outstanding_show(struct device *cdev,
}
static ssize_t
+enable_sdev_max_qd_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
+}
+
+static ssize_t
+enable_sdev_max_qd_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+ u32 val = 0;
+ bool is_target_prop;
+ int ret_target_prop = DCMD_FAILED;
+ struct scsi_device *sdev;
+
+ if (kstrtou32(buf, 0, &val) != 0) {
+ pr_err("megasas: could not set enable_sdev_max_qd\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&instance->reset_mutex);
+ if (val)
+ instance->enable_sdev_max_qd = true;
+ else
+ instance->enable_sdev_max_qd = false;
+
+ shost_for_each_device(sdev, shost) {
+ ret_target_prop = megasas_get_target_prop(instance, sdev);
+ is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+ megasas_set_fw_assisted_qd(sdev, is_target_prop);
+ }
+ mutex_unlock(&instance->reset_mutex);
+
+ return strlen(buf);
+}
+
+static ssize_t
dump_system_regs_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -3313,6 +3379,7 @@ static DEVICE_ATTR_RW(fw_crash_state);
static DEVICE_ATTR_RO(page_size);
static DEVICE_ATTR_RO(ldio_outstanding);
static DEVICE_ATTR_RO(fw_cmds_outstanding);
+static DEVICE_ATTR_RW(enable_sdev_max_qd);
static DEVICE_ATTR_RO(dump_system_regs);
static DEVICE_ATTR_RO(raid_map_id);
@@ -3323,6 +3390,7 @@ static struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
&dev_attr_fw_cmds_outstanding,
+ &dev_attr_enable_sdev_max_qd,
&dev_attr_dump_system_regs,
&dev_attr_raid_map_id,
NULL,
@@ -5894,6 +5962,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
MR_MAX_RAID_MAP_SIZE_MASK);
}
+ instance->enable_sdev_max_qd = enable_sdev_max_qd;
+
switch (instance->adapter_type) {
case VENTURA_SERIES:
fusion->pcie_bw_limitation = true;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 120e3c4de8c2..e301458bcbae 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -323,9 +323,6 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
{
u16 cur_max_fw_cmds = 0;
u16 ldio_threshold = 0;
- struct megasas_register_set __iomem *reg_set;
-
- reg_set = instance->reg_set;
/* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
if (instance->adapter_type < VENTURA_SERIES)
@@ -3511,7 +3508,7 @@ megasas_complete_r1_command(struct megasas_instance *instance,
* @instance: Adapter soft state
* Completes all commands that is in reply descriptor queue
*/
-int
+static int
complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
struct megasas_irq_context *irq_context)
{
@@ -3702,7 +3699,7 @@ static void megasas_enable_irq_poll(struct megasas_instance *instance)
* megasas_sync_irqs - Synchronizes all IRQs owned by adapter
* @instance: Adapter soft state
*/
-void megasas_sync_irqs(unsigned long instance_addr)
+static void megasas_sync_irqs(unsigned long instance_addr)
{
u32 count, i;
struct megasas_instance *instance =
@@ -3760,7 +3757,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
*
* Tasklet to complete cmds
*/
-void
+static void
megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
{
struct megasas_instance *instance =
@@ -3780,7 +3777,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
/**
* megasas_isr_fusion - isr entry point
*/
-irqreturn_t megasas_isr_fusion(int irq, void *devp)
+static irqreturn_t megasas_isr_fusion(int irq, void *devp)
{
struct megasas_irq_context *irq_context = devp;
struct megasas_instance *instance = irq_context->instance;
@@ -3816,7 +3813,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
* mfi_cmd: megasas_cmd pointer
*
*/
-void
+static void
build_mpt_mfi_pass_thru(struct megasas_instance *instance,
struct megasas_cmd *mfi_cmd)
{
@@ -3874,7 +3871,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
* @cmd: mfi cmd to build
*
*/
-union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
@@ -3900,7 +3897,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
* @cmd: mfi cmd pointer
*
*/
-void
+static void
megasas_issue_dcmd_fusion(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
@@ -4096,8 +4093,9 @@ static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
}
/* This function waits for outstanding commands on fusion to complete */
-int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
- int reason, int *convert)
+static int
+megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ int reason, int *convert)
{
int i, outstanding, retval = 0, hb_seconds_missed = 0;
u32 fw_state, abs_state;
@@ -4221,7 +4219,7 @@ void megasas_reset_reply_desc(struct megasas_instance *instance)
* megasas_refire_mgmt_cmd : Re-fire management commands
* @instance: Controller's soft instance
*/
-void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
+static void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
{
int j;
struct megasas_cmd_fusion *cmd_fusion;
@@ -4747,7 +4745,8 @@ out:
}
/*SRIOV get other instance in cluster if any*/
-struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
+static struct
+megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
{
int i;
@@ -5053,7 +5052,7 @@ out:
}
/* Fusion Crash dump collection */
-void megasas_fusion_crash_dump(struct megasas_instance *instance)
+static void megasas_fusion_crash_dump(struct megasas_instance *instance)
{
u32 status_reg;
u8 partial_copy = 0;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 7efd17a3c25b..18b1e31b5eb8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -9,7 +9,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.53
+ * mpi2.h Version: 02.00.54
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -121,6 +121,7 @@
* 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT.
* 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI2_IOCSTATUS_FAILURE
+ * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT
* --------------------------------------------------------------------------
*/
@@ -161,7 +162,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x35)
+#define MPI2_HEADER_VERSION_UNIT (0x36)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 167d79d145ca..3a6871aecada 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -7,7 +7,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.46
+ * mpi2_cnfg.h Version: 02.00.47
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -244,6 +244,11 @@
* Added DMDReport Delay Time defines to
* PCIeIOUnitPage1
* --------------------------------------------------------------------------
+ * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7.
+ * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1
+ * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1
+ * Added DMDReport Delay Time defines to PCIeIOUnitPage1
+ * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
*/
#ifndef MPI2_CNFG_H
@@ -810,7 +815,8 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
U8 Location; /*0x14 */
U8 ReceptacleID; /*0x15 */
U16 Slot; /*0x16 */
- U32 Reserved2; /*0x18 */
+ U16 Slotx2; /*0x18 */
+ U16 Slotx4; /*0x1A */
} MPI2_MANPAGE7_CONNECTOR_INFO,
*PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
Mpi2ManPage7ConnectorInfo_t,
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
index 4959585f029d..a3f677853098 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h
@@ -5,7 +5,7 @@
* Name: mpi2_image.h
* Description: Contains definitions for firmware and other component images
* Creation Date: 04/02/2018
- * Version: 02.06.03
+ * Version: 02.06.04
*
*
* Version History
@@ -17,6 +17,8 @@
* 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26
* 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE
* 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES
+ * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP
+ * Shorten some defines to be compatible with DOS
*/
#ifndef MPI2_IMAGE_H
#define MPI2_IMAGE_H
@@ -200,17 +202,17 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER {
#define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042)
/**** Definitions for Signature1 field ****/
-#define MPI26_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_CBB (0x20424243)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_MFG (0x2047464D)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_BIOS (0x534F4942)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIM (0x4D494948)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIA (0x41494948)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_CPLD (0x444C5043)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_NVDATA (0x5444564E)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
-#define MPI26_IMAGE_HEADER_SIGNATURE1_PBLP (0x50424C50)
+#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041)
+#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243)
+#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D)
+#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942)
+#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948)
+#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948)
+#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043)
+#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053)
+#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E)
+#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147)
+#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250)
/**** Definitions for Signature2 field ****/
#define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
@@ -278,6 +280,7 @@ typedef struct _MPI2_EXT_IMAGE_HEADER {
#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
#define MPI2_EXT_IMAGE_TYPE_RDE (0x0A)
+#define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B)
#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -472,12 +475,12 @@ Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
-#define MPI26_HASH_ALGORITHM_VERSION_MASK (0xE0)
-#define MPI26_HASH_ALGORITHM_VERSION_NONE (0x00)
-#define MPI26_HASH_ALGORITHM_VERSION_SHA1 (0x20)
-#define MPI26_HASH_ALGORITHM_VERSION_SHA2 (0x40)
-#define MPI26_HASH_ALGORITHM_VERSION_SHA3 (0x60)
-#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F)
+#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0)
+#define MPI26_HASH_ALGORITHM_VER_NONE (0x00)
+#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20)
+#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40)
+#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60)
+#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F)
#define MPI26_HASH_ALGORITHM_SIZE_256 (0x01)
#define MPI26_HASH_ALGORITHM_SIZE_512 (0x02)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
index 63a09509d7d1..bb7b79cfa558 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h
@@ -6,7 +6,7 @@
* Title: MPI PCIe Attached Devices structures and definitions.
* Creation Date: October 9, 2012
*
- * mpi2_pci.h Version: 02.00.03
+ * mpi2_pci.h Version: 02.00.04
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -24,6 +24,8 @@
* 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to
* NVME Encapsulated Request.
* 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req
+ * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI
+ * Shortten some defines to be compatible with DOS
* --------------------------------------------------------------------------
*/
@@ -41,7 +43,7 @@
#define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000)
#define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001)
#define MPI26_PCIE_DEVINFO_NVME (0x00000003)
-
+#define MPI26_PCIE_DEVINFO_SCSI (0x00000004)
/****************************************************************************
* NVMe Encapsulated message
@@ -75,10 +77,9 @@ typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST {
#define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
#define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010)
/*Error Response Address Space */
-#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C)
-#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR_MASK (0x000C)
-#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000)
-#define MPI26_NVME_FLAGS_IOCCTL_RSP_ADDR (0x0008)
+#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C)
+#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000)
+#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008)
/* Data Direction*/
#define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003)
#define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 3f966b6796b3..17ef7f63b938 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -7,7 +7,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.15
+ * mpi2_tool.h Version: 02.00.16
*
* Version History
* ---------------
@@ -40,6 +40,7 @@
* Tool Request Message.
* 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool.
* Added option for DeviceInfo field in ISTWI tool.
+ * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS.
* --------------------------------------------------------------------------
*/
@@ -230,11 +231,11 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
/*MPI26 TOOLBOX Request MsgFlags defines */
-#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_MASK (0x01)
+#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01)
/*Request uses Man Page 43 device index addressing */
-#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINDEX (0x00)
+#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00)
/*Request uses Man Page 43 device info struct addressing */
-#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINFO (0x01)
+#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01)
/*Toolbox ISTWI Read Write Tool reply message */
typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
@@ -403,7 +404,7 @@ Mpi2ToolboxTextDisplayRequest_t,
*/
/*Toolbox Backend Lane Margining Tool request message */
-typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST {
+typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST {
U8 Tool; /*0x00 */
U8 Reserved1; /*0x01 */
U8 ChainOffset; /*0x02 */
@@ -434,7 +435,7 @@ typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST {
/*Toolbox Backend Lane Margining Tool reply message */
-typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REPLY {
+typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY {
U8 Tool; /*0x00 */
U8 Reserved1; /*0x01 */
U8 MsgLength; /*0x02 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 050c0f029ef9..fea3cb6a090b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2260,6 +2260,11 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
bool build_prp = true;
data_length = scsi_bufflen(scmd);
+ if (pcie_device &&
+ (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
+ build_prp = false;
+ return build_prp;
+ }
/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
* we built IEEE SGL
@@ -3178,6 +3183,37 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
}
}
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
+
+/**
+ * _base_check_for_fault_and_issue_reset - check if IOC is in fault state
+ * and if it is in fault state then issue diag reset.
+ * @ioc: per adapter object
+ *
+ * Returns: 0 for success, non-zero for failure.
+ */
+static int
+_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 ioc_state;
+ int rc = -EFAULT;
+
+ dinitprintk(ioc, pr_info("%s\n", __func__));
+ if (ioc->pci_error_recovery)
+ return 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = _base_diag_reset(ioc);
+ }
+
+ return rc;
+}
+
/**
* mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
* @ioc: per adapter object
@@ -3190,7 +3226,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
struct pci_dev *pdev = ioc->pdev;
u32 memap_sz;
u32 pio_sz;
- int i, r = 0;
+ int i, r = 0, rc;
u64 pio_chip = 0;
phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
@@ -3251,8 +3287,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
_base_mask_interrupts(ioc);
r = _base_get_ioc_facts(ioc);
- if (r)
- goto out_fail;
+ if (r) {
+ rc = _base_check_for_fault_and_issue_reset(ioc);
+ if (rc || (_base_get_ioc_facts(ioc)))
+ goto out_fail;
+ }
if (!ioc->rdpq_array_enable_assigned) {
ioc->rdpq_array_enable = ioc->rdpq_array_capable;
@@ -5037,6 +5076,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
_base_release_memory_pools(ioc);
goto retry_allocation;
}
+ memset(ioc->request, 0, sz);
if (retry_sz)
ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
@@ -5410,8 +5450,6 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
*
* Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
*/
-static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
static int
_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
@@ -5868,6 +5906,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc->base_cmds.status = MPT3_CMD_PENDING;
request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->base_cmds.smid = smid;
+ memset(request, 0, ioc->request_sz);
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
init_completion(&ioc->base_cmds.done);
ioc->put_smid_default(ioc, smid);
@@ -6686,7 +6725,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
{
- int r, i, index;
+ int r, i, index, rc;
unsigned long flags;
u32 reply_address;
u16 smid;
@@ -6789,8 +6828,19 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
skip_init_reply_post_free_queue:
r = _base_send_ioc_init(ioc);
- if (r)
- return r;
+ if (r) {
+ /*
+ * No need to check IOC state for fault state & issue
+ * diag reset during host reset. This check is need
+ * only during driver load time.
+ */
+ if (!ioc->is_driver_loading)
+ return r;
+
+ rc = _base_check_for_fault_and_issue_reset(ioc);
+ if (rc || (_base_send_ioc_init(ioc)))
+ return r;
+ }
/* initialize reply free host index */
ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
@@ -6882,7 +6932,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
int
mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
{
- int r, i;
+ int r, i, rc;
int cpu_id, last_cpu_id = 0;
dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
@@ -6926,8 +6976,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
pci_set_drvdata(ioc->pdev, ioc->shost);
r = _base_get_ioc_facts(ioc);
- if (r)
- goto out_free_resources;
+ if (r) {
+ rc = _base_check_for_fault_and_issue_reset(ioc);
+ if (rc || (_base_get_ioc_facts(ioc)))
+ goto out_free_resources;
+ }
switch (ioc->hba_mpi_version_belonged) {
case MPI2_VERSION:
@@ -6995,8 +7048,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
r = _base_get_port_facts(ioc, i);
- if (r)
- goto out_free_resources;
+ if (r) {
+ rc = _base_check_for_fault_and_issue_reset(ioc);
+ if (rc || (_base_get_port_facts(ioc, i)))
+ goto out_free_resources;
+ }
}
r = _base_allocate_memory_pools(ioc);
@@ -7118,6 +7174,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
if (r)
goto out_free_resources;
+ /*
+ * Copy current copy of IOCFacts in prev_fw_facts
+ * and it will be used during online firmware upgrade.
+ */
+ memcpy(&ioc->prev_fw_facts, &ioc->facts,
+ sizeof(struct mpt3sas_facts));
+
ioc->non_operational_loop = 0;
ioc->got_task_abort_from_ioctl = 0;
return 0;
@@ -7280,6 +7343,85 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
+ * attributes during online firmware upgrade and update the corresponding
+ * IOC variables accordingly.
+ *
+ * @ioc: Pointer to MPT_ADAPTER structure
+ */
+static int
+_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 pd_handles_sz;
+ void *pd_handles = NULL, *blocking_handles = NULL;
+ void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
+ struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
+
+ if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
+ pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ pd_handles_sz++;
+
+ pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
+ GFP_KERNEL);
+ if (!pd_handles) {
+ ioc_info(ioc,
+ "Unable to allocate the memory for pd_handles of sz: %d\n",
+ pd_handles_sz);
+ return -ENOMEM;
+ }
+ memset(pd_handles + ioc->pd_handles_sz, 0,
+ (pd_handles_sz - ioc->pd_handles_sz));
+ ioc->pd_handles = pd_handles;
+
+ blocking_handles = krealloc(ioc->blocking_handles,
+ pd_handles_sz, GFP_KERNEL);
+ if (!blocking_handles) {
+ ioc_info(ioc,
+ "Unable to allocate the memory for "
+ "blocking_handles of sz: %d\n",
+ pd_handles_sz);
+ return -ENOMEM;
+ }
+ memset(blocking_handles + ioc->pd_handles_sz, 0,
+ (pd_handles_sz - ioc->pd_handles_sz));
+ ioc->blocking_handles = blocking_handles;
+ ioc->pd_handles_sz = pd_handles_sz;
+
+ pend_os_device_add = krealloc(ioc->pend_os_device_add,
+ pd_handles_sz, GFP_KERNEL);
+ if (!pend_os_device_add) {
+ ioc_info(ioc,
+ "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
+ pd_handles_sz);
+ return -ENOMEM;
+ }
+ memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
+ (pd_handles_sz - ioc->pend_os_device_add_sz));
+ ioc->pend_os_device_add = pend_os_device_add;
+ ioc->pend_os_device_add_sz = pd_handles_sz;
+
+ device_remove_in_progress = krealloc(
+ ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
+ if (!device_remove_in_progress) {
+ ioc_info(ioc,
+ "Unable to allocate the memory for "
+ "device_remove_in_progress of sz: %d\n "
+ , pd_handles_sz);
+ return -ENOMEM;
+ }
+ memset(device_remove_in_progress +
+ ioc->device_remove_in_progress_sz, 0,
+ (pd_handles_sz - ioc->device_remove_in_progress_sz));
+ ioc->device_remove_in_progress = device_remove_in_progress;
+ ioc->device_remove_in_progress_sz = pd_handles_sz;
+ }
+
+ memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
+ return 0;
+}
+
+/**
* mpt3sas_base_hard_reset_handler - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
* @type: FORCE_BIG_HAMMER or SOFT_RESET
@@ -7342,6 +7484,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
if (r)
goto out;
+ r = _base_check_ioc_facts_changes(ioc);
+ if (r) {
+ ioc_info(ioc,
+ "Some of the parameters got changed in this new firmware"
+ " image and it requires system reboot\n");
+ goto out;
+ }
if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
panic("%s: Issue occurred with flashing controller firmware."
"Please reboot the system and ensure that the correct"
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 6afbdb044310..faca0a5e71f8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "29.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 29
+#define MPT3SAS_DRIVER_VERSION "31.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 31
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -583,6 +583,7 @@ static inline void sas_device_put(struct _sas_device *s)
* @enclosure_level: The level of device's enclosure from the controller
* @connector_name: ASCII value of the Connector's name
* @serial_number: pointer of serial number string allocated runtime
+ * @access_status: Device's Access Status
* @refcount: reference count for deletion
*/
struct _pcie_device {
@@ -604,6 +605,7 @@ struct _pcie_device {
u8 connector_name[4];
u8 *serial_number;
u8 reset_timeout;
+ u8 access_status;
struct kref refcount;
};
/**
@@ -1045,6 +1047,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @thresh_hold: Max number of reply descriptors processed
* before updating Host Index
+ * @drv_support_bitmap: driver's supported feature bit map
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -1066,6 +1069,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @event_log: event log pointer
* @event_masks: events that are masked
* @facts: static facts data
+ * @prev_fw_facts: previous fw facts data
* @pfacts: static port facts data
* @manu_pg0: static manufacturing page 0
* @manu_pg10: static manufacturing page 10
@@ -1227,6 +1231,8 @@ struct MPT3SAS_ADAPTER {
bool msix_load_balance;
u16 thresh_hold;
u8 high_iops_queues;
+ u32 drv_support_bitmap;
+ bool enable_sdev_max_qd;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
@@ -1276,6 +1282,7 @@ struct MPT3SAS_ADAPTER {
/* static config pages */
struct mpt3sas_facts facts;
+ struct mpt3sas_facts prev_fw_facts;
struct mpt3sas_port_facts *pfacts;
Mpi2ManufacturingPage0_t manu_pg0;
struct Mpi2ManufacturingPage10_t manu_pg10;
@@ -1450,6 +1457,8 @@ struct MPT3SAS_ADAPTER {
GET_MSIX_INDEX get_msix_index_for_smlio;
};
+#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
+
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
@@ -1579,6 +1588,7 @@ struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc,
void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
/* config shared API */
u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1733,4 +1743,20 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
+/**
+ * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if scsi device.
+ */
+static inline int
+mpt3sas_scsih_is_pcie_scsi_device(u32 device_info)
+{
+ if ((device_info &
+ MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI)
+ return 1;
+ else
+ return 0;
+}
#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d4ecfbbe738c..7d696952b376 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -596,8 +596,16 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
if (priv_data->sas_target->handle != handle)
continue;
st = scsi_cmd_priv(scmd);
- tm_request->TaskMID = cpu_to_le16(st->smid);
- found = 1;
+
+ /*
+ * If the given TaskMID from the user space is zero, then the
+ * first outstanding smid will be picked up. Otherwise,
+ * targeted smid will be the one.
+ */
+ if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
+ tm_request->TaskMID = cpu_to_le16(st->smid);
+ found = 1;
+ }
}
if (!found) {
@@ -654,7 +662,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
- u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
issue_reset = 0;
@@ -707,6 +714,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
memcpy(request, mpi_request, karg.data_sge_offset*4);
ioc->ctl_cmds.smid = smid;
data_out_sz = karg.data_out_size;
@@ -921,13 +929,37 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi2ToolboxCleanRequest_t *toolbox_request =
(Mpi2ToolboxCleanRequest_t *)mpi_request;
- if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
+ if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL)
+ || (toolbox_request->Tool ==
+ MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN))
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- } else {
+ else if (toolbox_request->Tool ==
+ MPI2_TOOLBOX_MEMORY_MOVE_TOOL) {
+ Mpi2ToolboxMemMoveRequest_t *mem_move_request =
+ (Mpi2ToolboxMemMoveRequest_t *)request;
+ Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL;
+
+ ioc->build_sg_mpi(ioc, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz);
+ if (data_out_sz && !data_in_sz) {
+ dst =
+ (Mpi2SGESimple64_t *)&mem_move_request->SGL;
+ src = (void *)dst + ioc->sge_size;
+
+ memcpy(&tmp, src, ioc->sge_size);
+ memcpy(src, dst, ioc->sge_size);
+ memcpy(dst, &tmp, ioc->sge_size);
+ }
+ if (ioc->logging_level & MPT_DEBUG_TM) {
+ ioc_info(ioc,
+ "Mpi2ToolboxMemMoveRequest_t request msg\n");
+ _debug_dump_mf(mem_move_request,
+ ioc->request_sz/4);
+ }
+ } else
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
- data_in_dma, data_in_sz);
- }
+ data_in_dma, data_in_sz);
ioc->put_smid_default(ioc, smid);
break;
}
@@ -1047,12 +1079,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpt3sas_halt_firmware(ioc);
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(mpi_request->FunctionDependent1));
- if (pcie_device && (!ioc->tm_custom_handling))
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(
+ pcie_device->device_info))))
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
- tr_method);
+ MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
@@ -3278,9 +3312,8 @@ diag_trigger_scsi_store(struct device *cdev,
ssize_t sz;
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
- memset(&ioc->diag_trigger_scsi, 0,
- sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ sz = min(sizeof(ioc->diag_trigger_scsi), count);
+ memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
memcpy(&ioc->diag_trigger_scsi, buf, sz);
if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
@@ -3349,6 +3382,125 @@ static DEVICE_ATTR_RW(diag_trigger_mpi);
/*****************************************/
+/**
+ * drv_support_bitmap_show - driver supported feature bitmap
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+drv_support_bitmap_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap);
+}
+static DEVICE_ATTR_RO(drv_support_bitmap);
+
+/**
+ * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs read/write shost attribute. This attribute is used to set the
+ * targets queue depth to HBA IO queue depth if this attribute is enabled.
+ */
+static ssize_t
+enable_sdev_max_qd_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd);
+}
+
+/**
+ * enable_sdev_max_qd_store - Enable/disable sdev max qd
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs read/write shost attribute. This attribute is used to set the
+ * targets queue depth to HBA IO queue depth if this attribute is enabled.
+ * If this attribute is disabled then targets will have corresponding default
+ * queue depth.
+ */
+static ssize_t
+enable_sdev_max_qd_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ int val = 0;
+ struct scsi_device *sdev;
+ struct _raid_device *raid_device;
+ int qdepth;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ switch (val) {
+ case 0:
+ ioc->enable_sdev_max_qd = 0;
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ if (!sas_target_priv_data)
+ continue;
+
+ if (sas_target_priv_data->flags &
+ MPT_TARGET_FLAGS_VOLUME) {
+ raid_device =
+ mpt3sas_raid_device_find_by_handle(ioc,
+ sas_target_priv_data->handle);
+
+ switch (raid_device->volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ qdepth =
+ MPT3SAS_SAS_QUEUE_DEPTH;
+ else
+ qdepth =
+ MPT3SAS_SATA_QUEUE_DEPTH;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ case MPI2_RAID_VOL_TYPE_UNKNOWN:
+ default:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ }
+ } else if (sas_target_priv_data->flags &
+ MPT_TARGET_FLAGS_PCIE_DEVICE)
+ qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
+ else
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
+ }
+ break;
+ case 1:
+ ioc->enable_sdev_max_qd = 1;
+ shost_for_each_device(sdev, ioc->shost)
+ mpt3sas_scsih_change_queue_depth(sdev,
+ shost->can_queue);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(enable_sdev_max_qd);
+
struct device_attribute *mpt3sas_host_attrs[] = {
&dev_attr_version_fw,
&dev_attr_version_bios,
@@ -3374,7 +3526,9 @@ struct device_attribute *mpt3sas_host_attrs[] = {
&dev_attr_diag_trigger_event,
&dev_attr_diag_trigger_scsi,
&dev_attr_diag_trigger_mpi,
+ &dev_attr_drv_support_bitmap,
&dev_attr_BRM_status,
+ &dev_attr_enable_sdev_max_qd,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 717ba0845a2a..d0c2f8d6f2a2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -155,6 +155,10 @@ static int prot_mask = -1;
module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+static bool enable_sdev_max_qd;
+module_param(enable_sdev_max_qd, bool, 0444);
+MODULE_PARM_DESC(enable_sdev_max_qd,
+ "Enable sdev max qd as can_queue, def=disabled(0)");
/* raid transport support */
static struct raid_template *mpt3sas_raid_template;
@@ -1152,6 +1156,11 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (pcie_device->access_status ==
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+ return;
+ }
if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
_scsih_pcie_device_remove(ioc, pcie_device);
} else if (!pcie_device->starget) {
@@ -1196,7 +1205,9 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
- _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
+ if (pcie_device->access_status !=
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
+ _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
}
/**
@@ -1433,17 +1444,20 @@ _scsih_is_end_device(u32 device_info)
}
/**
- * _scsih_is_nvme_device - determines if device is an nvme device
+ * _scsih_is_nvme_pciescsi_device - determines if
+ * device is an pcie nvme/scsi device
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Return: 1 if nvme device.
+ * Returns 1 if device is pcie device type nvme/scsi.
*/
static int
-_scsih_is_nvme_device(u32 device_info)
+_scsih_is_nvme_pciescsi_device(u32 device_info)
{
- if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
- == MPI26_PCIE_DEVINFO_NVME)
+ if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_NVME) ||
+ ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_SCSI))
return 1;
else
return 0;
@@ -1509,7 +1523,13 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = shost->can_queue;
- /* limit max device queue for SATA to 32 */
+ /*
+ * limit max device queue for SATA to 32 if enable_sdev_max_qd
+ * is disabled.
+ */
+ if (ioc->enable_sdev_max_qd)
+ goto not_sata;
+
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
goto not_sata;
@@ -1539,6 +1559,25 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
}
/**
+ * mpt3sas_scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (ioc->enable_sdev_max_qd)
+ qdepth = shost->can_queue;
+
+ scsih_change_queue_depth(sdev, qdepth);
+}
+
+/**
* scsih_target_alloc - target add routine
* @starget: scsi target struct
*
@@ -2296,7 +2335,7 @@ scsih_slave_configure(struct scsi_device *sdev)
MPT3SAS_RAID_MAX_SECTORS);
}
- scsih_change_queue_depth(sdev, qdepth);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
/* raid transport support */
if (!ioc->is_warpdrive)
@@ -2360,7 +2399,7 @@ scsih_slave_configure(struct scsi_device *sdev)
pcie_device_put(pcie_device);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- scsih_change_queue_depth(sdev, qdepth);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
** merged and can eliminate holes created during merging
** operation.
@@ -2420,7 +2459,7 @@ scsih_slave_configure(struct scsi_device *sdev)
_scsih_display_sata_capabilities(ioc, handle, sdev);
- scsih_change_queue_depth(sdev, qdepth);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
if (ssp_target) {
sas_read_port_mode_page(sdev);
@@ -2872,7 +2911,8 @@ scsih_abort(struct scsi_cmnd *scmd)
handle = sas_device_priv_data->sas_target->handle;
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
- if (pcie_device && (!ioc->tm_custom_handling))
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
timeout = ioc->nvme_abort_timeout;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
@@ -2943,11 +2983,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
- if (pcie_device && (!ioc->tm_custom_handling)) {
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
tr_timeout = pcie_device->reset_timeout;
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
tr_timeout, tr_method);
@@ -3020,7 +3062,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
- if (pcie_device && (!ioc->tm_custom_handling)) {
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
tr_timeout = pcie_device->reset_timeout;
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
@@ -3598,7 +3641,9 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_address = pcie_device->wwid;
}
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- if (pcie_device && (!ioc->tm_custom_handling))
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(
+ pcie_device->device_info))))
tr_method =
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
else
@@ -4654,11 +4699,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
* since we're lockless at this point
*/
do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
} while (_scsih_set_satl_pending(scmd, true));
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
@@ -6456,24 +6498,17 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_sas_device_status_change_event - handle device status change
* @ioc: per adapter object
- * @fw_event: The fw_event_work object
+ * @event_data: The fw event
* Context: user.
*/
static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
- struct fw_event_work *fw_event)
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
{
struct MPT3SAS_TARGET *target_priv_data;
struct _sas_device *sas_device;
u64 sas_address;
unsigned long flags;
- Mpi2EventDataSasDeviceStatusChange_t *event_data =
- (Mpi2EventDataSasDeviceStatusChange_t *)
- fw_event->event_data;
-
- if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
- _scsih_sas_device_status_change_event_debug(ioc,
- event_data);
/* In MPI Revision K (0xC), the internal device reset complete was
* implemented, so avoid setting tm_busy flag for older firmware.
@@ -6505,6 +6540,12 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
else
target_priv_data->tm_busy = 0;
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ ioc_info(ioc,
+ "%s tm_busy flag for handle(0x%04x)\n",
+ (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
+ target_priv_data->handle);
+
out:
if (sas_device)
sas_device_put(sas_device);
@@ -6539,6 +6580,11 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
break;
case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
desc = "PCIe device blocked";
+ ioc_info(ioc,
+ "Device with Access Status (%s): wwid(0x%016llx), "
+ "handle(0x%04x)\n ll only be added to the internal list",
+ desc, (u64)wwid, handle);
+ rc = 0;
break;
case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
desc = "PCIe device mem space access failed";
@@ -6643,7 +6689,8 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
pcie_device->enclosure_level,
pcie_device->connector_name);
- if (pcie_device->starget)
+ if (pcie_device->starget && (pcie_device->access_status !=
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
scsi_remove_target(&pcie_device->starget->dev);
dewtprintk(ioc,
ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
@@ -6694,7 +6741,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if this is end device */
device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
- if (!(_scsih_is_nvme_device(device_info)))
+ if (!(_scsih_is_nvme_pciescsi_device(device_info)))
return;
wwid = le64_to_cpu(pcie_device_pg0.WWID);
@@ -6709,6 +6756,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (unlikely(pcie_device->handle != handle)) {
starget = pcie_device->starget;
sas_target_priv_data = starget->hostdata;
+ pcie_device->access_status = pcie_device_pg0.AccessStatus;
starget_printk(KERN_INFO, starget,
"handle changed from(0x%04x) to (0x%04x)!!!\n",
pcie_device->handle, handle);
@@ -6803,7 +6851,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device_pg0.AccessStatus))
return 0;
- if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+ if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
+ (pcie_device_pg0.DeviceInfo))))
return 0;
pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
@@ -6813,6 +6862,31 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
return 0;
}
+ /* PCIe Device Page 2 contains read-only information about a
+ * specific NVMe device; therefore, this page is only
+ * valid for NVMe devices and skip for pcie devices of type scsi.
+ */
+ if (!(mpt3sas_scsih_is_pcie_scsi_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+ if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
+ &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle)) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 0;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 0;
+ }
+ }
+
pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
if (!pcie_device) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -6824,6 +6898,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device->id = ioc->pcie_target_id++;
pcie_device->channel = PCIE_CHANNEL;
pcie_device->handle = handle;
+ pcie_device->access_status = pcie_device_pg0.AccessStatus;
pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
pcie_device->wwid = wwid;
pcie_device->port_num = pcie_device_pg0.PortNum;
@@ -6855,27 +6930,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
}
/* TODO -- Add device name once FW supports it */
- if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
- &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
- ioc_err(ioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- kfree(pcie_device);
- return 0;
- }
-
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
- if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- ioc_err(ioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- kfree(pcie_device);
- return 0;
- }
- pcie_device->nvme_mdts =
- le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
- if (pcie_device_pg2.ControllerResetTO)
- pcie_device->reset_timeout =
- pcie_device_pg2.ControllerResetTO;
- else
+ if (!(mpt3sas_scsih_is_pcie_scsi_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+ pcie_device->nvme_mdts =
+ le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+ if (pcie_device_pg2.ControllerResetTO)
+ pcie_device->reset_timeout =
+ pcie_device_pg2.ControllerResetTO;
+ else
+ pcie_device->reset_timeout = 30;
+ } else
pcie_device->reset_timeout = 30;
if (ioc->wait_for_discovery_to_complete)
@@ -8507,6 +8571,8 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
&& (pcie_device->slot == le16_to_cpu(
pcie_device_pg0->Slot))) {
+ pcie_device->access_status =
+ pcie_device_pg0->AccessStatus;
pcie_device->responding = 1;
starget = pcie_device->starget;
if (starget && starget->hostdata) {
@@ -8594,7 +8660,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
- if (!(_scsih_is_nvme_device(device_info)))
+ if (!(_scsih_is_nvme_pciescsi_device(device_info)))
continue;
_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
}
@@ -9175,7 +9241,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
- if (!(_scsih_is_nvme_device(
+ if (!(_scsih_is_nvme_pciescsi_device(
le32_to_cpu(pcie_device_pg0.DeviceInfo))))
continue;
pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
@@ -9308,7 +9374,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
_scsih_sas_topology_change_event(ioc, fw_event);
break;
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
- _scsih_sas_device_status_change_event(ioc, fw_event);
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ fw_event->event_data);
break;
case MPI2_EVENT_SAS_DISCOVERY:
_scsih_sas_discovery_event(ioc, fw_event);
@@ -9481,6 +9550,10 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
break;
}
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc,
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ mpi_reply->EventData);
+ break;
case MPI2_EVENT_IR_OPERATION_STATUS:
case MPI2_EVENT_SAS_DISCOVERY:
case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
@@ -10039,6 +10112,12 @@ _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
pcie_device_put(pcie_device);
continue;
}
+ if (pcie_device->access_status ==
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+ pcie_device_make_active(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ }
rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
pcie_device->id, 0);
if (rc) {
@@ -10453,6 +10532,13 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /*
+ * Enable MEMORY MOVE support flag.
+ */
+ ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
+
+ ioc->enable_sdev_max_qd = enable_sdev_max_qd;
+
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
/* initializing pci_access_mutex lock */
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index e6a95498ac0d..e0b427fdf818 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -3910,11 +3910,14 @@ static void __init ncr_prepare_setting(struct ncb *np)
np->scsi_mode = SMODE_HVD;
break;
}
+ /* fall through */
case 3: /* SYMBIOS controllers report HVD through GPIO3 */
if (INB(nc_gpreg) & 0x08)
break;
+ /* fall through */
case 2: /* Set HVD unconditionally */
np->scsi_mode = SMODE_HVD;
+ /* fall through */
case 1: /* Trust previous settings for HVD */
if (np->sv_stest2 & 0x20)
np->scsi_mode = SMODE_HVD;
@@ -6714,6 +6717,7 @@ void ncr_int_sir (struct ncb *np)
OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
return;
}
+ /* fall through */
case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */
case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 9453705f643a..7e48154e11c3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -1308,28 +1308,22 @@ out:
int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
struct pm8001_tmf_task tmf_task;
tmf_task.tmf = TMF_ABORT_TASK_SET;
- rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
- return rc;
+ return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
struct pm8001_tmf_task tmf_task;
tmf_task.tmf = TMF_CLEAR_ACA;
- rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
+ return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
{
- int rc = TMF_RESP_FUNC_FAILED;
struct pm8001_tmf_task tmf_task;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
@@ -1338,7 +1332,6 @@ int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
pm8001_printk("I_T_L_Q clear task set[%x]\n",
pm8001_dev->device_id));
tmf_task.tmf = TMF_CLEAR_TASK_SET;
- rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
- return rc;
+ return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 71ff3936da4f..398d2af60832 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5841,7 +5841,7 @@ out_disable_device:
}
/*
- * PCI driver structure of pcmraid driver
+ * PCI driver structure of pmcraid driver
*/
static struct pci_driver pmcraid_driver = {
.name = PMCRAID_DRIVER_NAME,
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 5a021217bfc9..f3f399fe10c8 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -49,6 +49,7 @@
#define QEDF_ABORT_TIMEOUT (10 * 1000)
#define QEDF_CLEANUP_TIMEOUT 1
#define QEDF_MAX_CDB_LEN 16
+#define QEDF_LL2_BUF_SIZE 2500 /* Buffer size required for LL2 Rx */
#define UPSTREAM_REMOVE 1
#define UPSTREAM_KEEP 1
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index d905a307302d..b88bed9bb133 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -47,13 +47,13 @@ qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
* @pf: the pf that is stopping
**/
void
-qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf)
+qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg)
{
- QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
"entry\n");
/* remove debugfs entries of this PF */
- debugfs_remove_recursive(qedf->bdf_dentry);
- qedf->bdf_dentry = NULL;
+ debugfs_remove_recursive(qedf_dbg->bdf_dentry);
+ qedf_dbg->bdf_dentry = NULL;
}
/**
@@ -140,10 +140,10 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
int cnt;
- struct qedf_dbg_ctx *qedf =
+ struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
- QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n");
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
cnt = min_t(int, count, cnt - *ppos);
@@ -158,7 +158,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
uint32_t val;
void *kern_buf;
int rval;
- struct qedf_dbg_ctx *qedf =
+ struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
if (!count || *ppos)
@@ -178,7 +178,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
else
qedf_debug = val;
- QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
return count;
}
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 5996f68fbf2b..87e169dcebdb 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -179,8 +179,11 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
orig_io_req = cb_arg->aborted_io_req;
- if (!orig_io_req)
+ if (!orig_io_req) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
goto out_free;
+ }
if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
@@ -350,8 +353,10 @@ void qedf_restart_rport(struct qedf_rport *fcport)
u32 port_id;
unsigned long flags;
- if (!fcport)
+ if (!fcport) {
+ QEDF_ERR(NULL, "fcport is NULL.\n");
return;
+ }
spin_lock_irqsave(&fcport->rport_lock, flags);
if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
@@ -418,8 +423,11 @@ static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
* If we are flushing the command just free the cb_arg as none of the
* response data will be valid.
*/
- if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
+ if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) {
+ QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n",
+ els_req->xid);
goto free_arg;
+ }
fcport = els_req->fcport;
mp_req = &(els_req->mp_req);
@@ -532,8 +540,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
orig_io_req = cb_arg->aborted_io_req;
- if (!orig_io_req)
+ if (!orig_io_req) {
+ QEDF_ERR(NULL, "orig_io_req is NULL.\n");
goto out_free;
+ }
clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
@@ -547,8 +557,11 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
/* If a SRR times out, simply free resources */
- if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
+ if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "ELS timeout rec_xid=0x%x.\n", srr_req->xid);
goto out_put;
+ }
/* Normalize response data into struct fc_frame */
mp_req = &(srr_req->mp_req);
@@ -721,8 +734,11 @@ void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
cb_arg = io_req->cb_arg;
/* If we timed out just free resources */
- if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
+ if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "cqe is NULL or timeout event (0x%x)", io_req->event);
goto free;
+ }
/* Kill the timer we put on the request */
cancel_delayed_work_sync(&io_req->timeout_work);
@@ -825,8 +841,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
orig_io_req = cb_arg->aborted_io_req;
- if (!orig_io_req)
+ if (!orig_io_req) {
+ QEDF_ERR(NULL, "orig_io_req is NULL.\n");
goto out_free;
+ }
if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
@@ -838,8 +856,12 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
/* If a REC times out, free resources */
- if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
+ if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n",
+ orig_io_req, orig_io_req->xid);
goto out_put;
+ }
/* Normalize response data into struct fc_frame */
mp_req = &(rec_req->mp_req);
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 362d2bed72fb..bb82f0875eca 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -23,8 +23,11 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
int rc = -1;
skb = dev_alloc_skb(sizeof(struct fip_vlan));
- if (!skb)
+ if (!skb) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Failed to allocate skb.\n");
return;
+ }
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
@@ -250,18 +253,24 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
fc_wwpn_valid = true;
break;
case FIP_DT_VN_ID:
+ fabric_id_valid = false;
vp = (struct fip_vn_desc *)desc;
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
- "vx_port fd_fc_id=%x fd_mac=%pM.\n",
- ntoh24(vp->fd_fc_id), vp->fd_mac);
- /* Check vx_port fabric ID */
- if (ntoh24(vp->fd_fc_id) !=
- qedf->lport->port_id)
- fabric_id_valid = false;
- /* Check vx_port MAC */
- if (!ether_addr_equal(vp->fd_mac,
- qedf->data_src_addr))
- fabric_id_valid = false;
+
+ QEDF_ERR(&qedf->dbg_ctx,
+ "CVL vx_port fd_fc_id=0x%x fd_mac=%pM fd_wwpn=%016llx.\n",
+ ntoh24(vp->fd_fc_id), vp->fd_mac,
+ get_unaligned_be64(&vp->fd_wwpn));
+ /* Check for vx_port wwpn OR Check vx_port
+ * fabric ID OR Check vx_port MAC
+ */
+ if ((get_unaligned_be64(&vp->fd_wwpn) ==
+ qedf->wwpn) ||
+ (ntoh24(vp->fd_fc_id) ==
+ qedf->lport->port_id) ||
+ (ether_addr_equal(vp->fd_mac,
+ qedf->data_src_addr))) {
+ fabric_id_valid = true;
+ }
break;
default:
/* Ignore anything else */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index d881e822f92c..e749a2dcaad7 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -104,6 +104,8 @@ static void qedf_cmd_timeout(struct work_struct *work)
qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
break;
default:
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Hit default case, xid=0x%x.\n", io_req->xid);
break;
}
}
@@ -122,8 +124,10 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
num_ios = max_xid - min_xid + 1;
/* Free fcoe_bdt_ctx structures */
- if (!cmgr->io_bdt_pool)
+ if (!cmgr->io_bdt_pool) {
+ QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
goto free_cmd_pool;
+ }
bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
for (i = 0; i < num_ios; i++) {
@@ -226,8 +230,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
GFP_KERNEL);
- if (!io_req->sense_buffer)
+ if (!io_req->sense_buffer) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Failed to alloc sense buffer.\n");
goto mem_err;
+ }
/* Allocate task parameters to pass to f/w init funcions */
io_req->task_params = kzalloc(sizeof(*io_req->task_params),
@@ -437,8 +444,12 @@ void qedf_release_cmd(struct kref *ref)
struct qedf_rport *fcport = io_req->fcport;
unsigned long flags;
- if (io_req->cmd_type == QEDF_SCSI_CMD)
+ if (io_req->cmd_type == QEDF_SCSI_CMD) {
+ QEDF_WARN(&fcport->qedf->dbg_ctx,
+ "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
+ io_req, io_req->xid);
WARN_ON(io_req->sc_cmd);
+ }
if (io_req->cmd_type == QEDF_ELS ||
io_req->cmd_type == QEDF_TASK_MGMT_CMD)
@@ -447,8 +458,10 @@ void qedf_release_cmd(struct kref *ref)
atomic_inc(&cmd_mgr->free_list_cnt);
atomic_dec(&fcport->num_active_ios);
atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
- if (atomic_read(&fcport->num_active_ios) < 0)
+ if (atomic_read(&fcport->num_active_ios) < 0) {
QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
+ WARN_ON(1);
+ }
/* Increment task retry identifier now that the request is released */
io_req->task_retry_identifier++;
@@ -951,6 +964,9 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Returning DNC as unloading or stop io, flags 0x%lx.\n",
+ qedf->flags);
sc_cmd->result = DID_NO_CONNECT << 16;
sc_cmd->scsi_done(sc_cmd);
return 0;
@@ -967,6 +983,9 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
rval = fc_remote_port_chkready(rport);
if (rval) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
+ rval, rport->port_id);
sc_cmd->result = rval;
sc_cmd->scsi_done(sc_cmd);
return 0;
@@ -974,12 +993,14 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
/* Retry command if we are doing a qed drain operation */
if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
rc = SCSI_MLQUEUE_HOST_BUSY;
goto exit_qcmd;
}
if (lport->state != LPORT_ST_READY ||
atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
rc = SCSI_MLQUEUE_HOST_BUSY;
goto exit_qcmd;
}
@@ -1297,8 +1318,10 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
struct scsi_cmnd *sc_cmd;
int refcount;
- if (!io_req)
+ if (!io_req) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
return;
+ }
if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
@@ -1414,8 +1437,12 @@ void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
u64 err_warn_bit_map;
u8 err_warn = 0xff;
- if (!cqe)
+ if (!cqe) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "cqe is NULL for io_req %p xid=0x%x\n",
+ io_req, io_req->xid);
return;
+ }
QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
"xid=0x%x\n", io_req->xid);
@@ -1477,8 +1504,11 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
{
int rval;
- if (!cqe)
+ if (!cqe) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "cqe is NULL for io_req %p\n", io_req);
return;
+ }
QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
"xid=0x%x\n", io_req->xid);
@@ -1543,8 +1573,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
int wait_cnt = 100;
int refcount = 0;
- if (!fcport)
+ if (!fcport) {
+ QEDF_ERR(NULL, "fcport is NULL\n");
return;
+ }
/* Check that fcport is still offloaded */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
@@ -1976,6 +2008,10 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
if (io_req->sc_cmd) {
+ if (!io_req->return_scsi_cmd_on_abts)
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+ "Not call scsi_done for xid=0x%x.\n",
+ io_req->xid);
if (io_req->return_scsi_cmd_on_abts)
qedf_scsi_done(qedf, io_req, DID_ERROR);
}
@@ -2201,6 +2237,10 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
}
if (io_req->sc_cmd) {
+ if (!io_req->return_scsi_cmd_on_abts)
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
+ "Not call scsi_done for xid=0x%x.\n",
+ io_req->xid);
if (io_req->return_scsi_cmd_on_abts)
qedf_scsi_done(qedf, io_req, DID_ERROR);
}
@@ -2241,7 +2281,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
u16 sqe_idx;
if (!sc_cmd) {
- QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
+ QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
return FAILED;
}
@@ -2363,8 +2403,8 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
QEDF_ERR(NULL,
"tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
- tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id,
- (int)sc_cmd->device->lun);
+ tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
+ rport->scsi_target_id, (int)sc_cmd->device->lun);
if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
QEDF_ERR(NULL, "stale rport\n");
@@ -2515,6 +2555,11 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
fh = (struct fc_frame_header *)fc_frame_header_get(fp);
memcpy(fh, (void *)bdq_addr, pktlen);
+ QEDF_WARN(&qedf->dbg_ctx,
+ "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
+ ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
+ fh->fh_type, fc_frame_payload_op(fp));
+
/* Initialize the frame so libfc sees it as a valid frame */
crc = fcoe_fc_crc(fp);
fc_frame_init(fp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 42542720962f..1659d35cd37b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -27,6 +27,7 @@ const struct qed_fcoe_ops *qed_ops;
static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void qedf_remove(struct pci_dev *pdev);
+static void qedf_shutdown(struct pci_dev *pdev);
/*
* Driver module parameters.
@@ -110,16 +111,18 @@ static struct kmem_cache *qedf_io_work_cache;
void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
{
- qedf->vlan_id = vlan_id;
- qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT;
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
- "prio=%d.\n", vlan_id, qedf->prio);
+ int vlan_id_tmp = 0;
+
+ vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
+ qedf->vlan_id = vlan_id_tmp;
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Setting vlan_id=0x%04x prio=%d.\n",
+ vlan_id_tmp, qedf->prio);
}
/* Returns true if we have a valid vlan, false otherwise */
static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
{
- int rc;
while (qedf->fipvlan_retries--) {
/* This is to catch if link goes down during fipvlan retries */
@@ -128,20 +131,25 @@ static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
return false;
}
- if (qedf->vlan_id > 0)
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
+ return false;
+ }
+
+ if (qedf->vlan_id > 0) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "vlan = 0x%x already set, calling ctlr_link_up.\n",
+ qedf->vlan_id);
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
+ fcoe_ctlr_link_up(&qedf->ctlr);
return true;
+ }
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"Retry %d.\n", qedf->fipvlan_retries);
init_completion(&qedf->fipvlan_compl);
qedf_fcoe_send_vlan_req(qedf);
- rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
- 1 * HZ);
- if (rc > 0 &&
- (atomic_read(&qedf->link_state) == QEDF_LINK_UP)) {
- fcoe_ctlr_link_up(&qedf->ctlr);
- return true;
- }
+ wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
}
return false;
@@ -162,6 +170,8 @@ static void qedf_handle_link_update(struct work_struct *work)
return;
if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
+ "Link is down, resetting vlan_id.\n");
qedf->vlan_id = 0;
return;
}
@@ -311,8 +321,10 @@ int qedf_send_flogi(struct qedf_ctx *qedf)
lport = qedf->lport;
- if (!lport->tt.elsct_send)
+ if (!lport->tt.elsct_send) {
+ QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
return -EINVAL;
+ }
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp) {
@@ -330,11 +342,6 @@ int qedf_send_flogi(struct qedf_ctx *qedf)
return 0;
}
-struct qedf_tmp_rdata_item {
- struct fc_rport_priv *rdata;
- struct list_head list;
-};
-
/*
* This function is called if link_down_tmo is in use. If we get a link up and
* link_down_tmo has not expired then use just FLOGI/ADISC to recover our
@@ -344,9 +351,8 @@ static void qedf_link_recovery(struct work_struct *work)
{
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, link_recovery.work);
- struct qedf_rport *fcport;
+ struct fc_lport *lport = qedf->lport;
struct fc_rport_priv *rdata;
- struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
bool rc;
int retries = 30;
int rval, i;
@@ -413,33 +419,14 @@ static void qedf_link_recovery(struct work_struct *work)
* Call lport->tt.rport_login which will cause libfc to send an
* ADISC since the rport is in state ready.
*/
- rcu_read_lock();
- list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
- rdata = fcport->rdata;
- if (rdata == NULL)
- continue;
- rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
- GFP_ATOMIC);
- if (!rdata_item)
- continue;
+ mutex_lock(&lport->disc.disc_mutex);
+ list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
- rdata_item->rdata = rdata;
- list_add(&rdata_item->list, &rdata_login_list);
- } else
- kfree(rdata_item);
- }
- rcu_read_unlock();
- /*
- * Do the fc_rport_login outside of the rcu lock so we don't take a
- * mutex in an atomic context.
- */
- list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
- list) {
- list_del(&rdata_item->list);
- fc_rport_login(rdata_item->rdata);
- kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
- kfree(rdata_item);
+ fc_rport_login(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
}
+ mutex_unlock(&lport->disc.disc_mutex);
}
static void qedf_update_link_speed(struct qedf_ctx *qedf,
@@ -467,6 +454,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
case 100000:
lport->link_speed = FC_PORTSPEED_100GBIT;
break;
+ case 20000:
+ lport->link_speed = FC_PORTSPEED_20GBIT;
+ break;
default:
lport->link_speed = FC_PORTSPEED_UNKNOWN;
break;
@@ -476,16 +466,40 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
* Set supported link speed by querying the supported
* capabilities of the link.
*/
- if (link->supported_caps & SUPPORTED_10000baseKR_Full)
+ if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) ||
+ (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) ||
+ (link->supported_caps & QED_LM_10000baseR_FEC_BIT) ||
+ (link->supported_caps & QED_LM_10000baseCR_Full_BIT) ||
+ (link->supported_caps & QED_LM_10000baseSR_Full_BIT) ||
+ (link->supported_caps & QED_LM_10000baseLR_Full_BIT) ||
+ (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) ||
+ (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) {
lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
- if (link->supported_caps & SUPPORTED_25000baseKR_Full)
+ }
+ if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) ||
+ (link->supported_caps & QED_LM_25000baseCR_Full_BIT) ||
+ (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) {
lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
- if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
+ }
+ if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) ||
+ (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) ||
+ (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) ||
+ (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) {
lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
- if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
+ }
+ if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) ||
+ (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) ||
+ (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) {
lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
- if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
+ }
+ if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) ||
+ (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) ||
+ (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) ||
+ (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) {
lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
+ }
+ if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT)
+ lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
}
@@ -493,6 +507,16 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
{
struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+ /*
+ * Prevent race where we're removing the module and we get link update
+ * for qed.
+ */
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Ignore link update, driver getting unload.\n");
+ return;
+ }
+
if (link->link_up) {
if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
@@ -2340,12 +2364,14 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
fr_dev(fp) = lport;
fr_sof(fp) = hp->fcoe_sof;
if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n");
kfree_skb(skb);
return;
}
fr_eof(fp) = crc_eof.fcoe_eof;
fr_crc(fp) = crc_eof.fcoe_crc32;
if (pskb_trim(skb, fr_len)) {
+ QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n");
kfree_skb(skb);
return;
}
@@ -2406,9 +2432,9 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
* empty then this is not addressed to our port so simply drop it.
*/
if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
- lport->port_id, ntoh24(fh->fh_d_id));
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n",
+ lport->port_id, ntoh24(fh->fh_d_id));
kfree_skb(skb);
return;
}
@@ -2417,6 +2443,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
(f_ctl & FC_FC_EX_CTX)) {
/* Drop incoming ABTS response that has both SEQ/EX CTX set */
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
+ "Dropping ABTS response as both SEQ/EX CTX set.\n");
kfree_skb(skb);
return;
}
@@ -2560,8 +2588,9 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
- QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
- "for id = %d.\n", sb_id);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Status block allocation failed for id = %d.\n",
+ sb_id);
return -ENOMEM;
}
@@ -2569,8 +2598,9 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
sb_id, QED_SB_TYPE_STORAGE);
if (ret) {
- QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
- "failed for id = %d.\n", sb_id);
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Status block initialization failed (0x%x) for id = %d.\n",
+ ret, sb_id);
return ret;
}
@@ -2654,13 +2684,18 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
io_req = &qedf->cmd_mgr->cmds[xid];
/* Completion not for a valid I/O anymore so just return */
- if (!io_req)
+ if (!io_req) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "io_req is NULL for xid=0x%x.\n", xid);
return;
+ }
fcport = io_req->fcport;
if (fcport == NULL) {
- QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "fcport is NULL for xid=0x%x io_req=%p.\n",
+ xid, io_req);
return;
}
@@ -2669,7 +2704,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
* isn't valid and shouldn't be taken. We should just return.
*/
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Session not offloaded yet, fcport = %p.\n", fcport);
return;
}
@@ -2881,6 +2917,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
*/
if (!qedf->p_cpuq) {
status = 1;
+ QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
goto mem_alloc_failure;
}
@@ -2896,8 +2933,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
/* Allocate DMA coherent buffers for BDQ */
rc = qedf_alloc_bdq(qedf);
- if (rc)
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
goto mem_alloc_failure;
+ }
/* Allocate a CQ and an associated PBL for each MSI-X vector */
for (i = 0; i < qedf->num_queues; i++) {
@@ -3107,6 +3146,7 @@ static struct pci_driver qedf_pci_driver = {
.id_table = qedf_pci_tbl,
.probe = qedf_probe,
.remove = qedf_remove,
+ .shutdown = qedf_shutdown,
};
static int __qedf_probe(struct pci_dev *pdev, int mode)
@@ -3209,6 +3249,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qed_params.is_vf = is_vf;
qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
if (!qedf->cdev) {
+ QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
rc = -ENODEV;
goto err1;
}
@@ -3277,8 +3318,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
/* Setup interrupts */
rc = qedf_setup_int(qedf);
- if (rc)
+ if (rc) {
+ QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
goto err3;
+ }
rc = qed_ops->start(qedf->cdev, &qedf->tasks);
if (rc) {
@@ -3360,7 +3403,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
}
memset(&params, 0, sizeof(params));
- params.mtu = 9000;
+ params.mtu = QEDF_LL2_BUF_SIZE;
ether_addr_copy(params.ll2_mac_address, qedf->mac);
/* Start LL2 processing thread */
@@ -3719,6 +3762,11 @@ void qedf_get_protocol_tlv_data(void *dev, void *data)
fcoe->scsi_tsk_full = qedf->task_set_fulls;
}
+static void qedf_shutdown(struct pci_dev *pdev)
+{
+ __qedf_remove(pdev, QEDF_MODE_NORMAL);
+}
+
/* Generic TLV data callback */
void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
{
@@ -3845,7 +3893,7 @@ static void __exit qedf_cleanup(void)
}
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module");
MODULE_AUTHOR("QLogic Corporation");
MODULE_VERSION(QEDF_VERSION);
module_init(qedf_init);
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index e57533de7e96..b0e37afe5bbb 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -4,9 +4,9 @@
* Copyright (c) 2016-2018 Cavium Inc.
*/
-#define QEDF_VERSION "8.37.25.20"
+#define QEDF_VERSION "8.42.3.0"
#define QEDF_DRIVER_MAJOR_VER 8
-#define QEDF_DRIVER_MINOR_VER 37
-#define QEDF_DRIVER_REV_VER 25
-#define QEDF_DRIVER_ENG_VER 20
+#define QEDF_DRIVER_MINOR_VER 42
+#define QEDF_DRIVER_REV_VER 3
+#define QEDF_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6b7b390b2e52..8190c2a27584 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -382,7 +382,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_region_size = size;
ha->optrom_state = QLA_SREADING;
- ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ ha->optrom_buffer = vzalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
ql_log(ql_log_warn, vha, 0x7062,
"Unable to allocate memory for optrom retrieval "
@@ -404,7 +404,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"Reading flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size);
- memset(ha->optrom_buffer, 0, ha->optrom_region_size);
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
break;
@@ -457,7 +456,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_region_size = size;
ha->optrom_state = QLA_SWRITING;
- ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ ha->optrom_buffer = vzalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
ql_log(ql_log_warn, vha, 0x7066,
"Unable to allocate memory for optrom update "
@@ -472,7 +471,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
"Staging flash region write -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size);
- memset(ha->optrom_buffer, 0, ha->optrom_region_size);
break;
case 3:
if (ha->optrom_state != QLA_SWRITING) {
@@ -726,7 +724,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
break;
} else {
/* Make sure FC side is not in reset */
- qla2x00_wait_for_hba_online(vha);
+ WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
+ QLA_SUCCESS);
/* Issue MPI reset */
scsi_block_requests(vha->host);
@@ -1126,7 +1125,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
char pci_info[30];
return scnprintf(buf, PAGE_SIZE, "%s\n",
- vha->hw->isp_ops->pci_info_str(vha, pci_info));
+ vha->hw->isp_ops->pci_info_str(vha, pci_info,
+ sizeof(pci_info)));
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 5441557b424b..28d587a89ba6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -12,10 +12,8 @@
#include <linux/bsg-lib.h>
/* BSG support for ELS/CT pass through */
-void
-qla2x00_bsg_job_done(void *ptr, int res)
+void qla2x00_bsg_job_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
@@ -25,10 +23,8 @@ qla2x00_bsg_job_done(void *ptr, int res)
sp->free(sp);
}
-void
-qla2x00_bsg_sp_free(void *ptr)
+void qla2x00_bsg_sp_free(srb_t *sp)
{
- srb_t *sp = ptr;
struct qla_hw_data *ha = sp->vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_request *bsg_request = bsg_job->request;
@@ -341,6 +337,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
rval = -ENOMEM;
goto done_free_fcport;
}
@@ -348,6 +346,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
+ dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
rval = -ENOMEM;
goto done_free_fcport;
}
@@ -1778,8 +1778,8 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
uint16_t nextlid = 0;
uint32_t tot_dsds;
srb_t *sp = NULL;
- uint32_t req_data_len = 0;
- uint32_t rsp_data_len = 0;
+ uint32_t req_data_len;
+ uint32_t rsp_data_len;
/* Check the type of the adapter */
if (!IS_BIDI_CAPABLE(ha)) {
@@ -1884,6 +1884,9 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
goto done_unmap_sg;
}
+ req_data_len = bsg_job->request_payload.payload_len;
+ rsp_data_len = bsg_job->reply_payload.payload_len;
+
if (req_data_len != rsp_data_len) {
rval = EXT_STATUS_BUSY;
ql_log(ql_log_warn, vha, 0x70aa,
@@ -1891,10 +1894,6 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
goto done_unmap_sg;
}
- req_data_len = bsg_job->request_payload.payload_len;
- rsp_data_len = bsg_job->reply_payload.payload_len;
-
-
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
if (!sp) {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9e80646722e2..30afc59c1870 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2743,7 +2743,8 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
void
-ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, void *buf, uint size)
+ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
+ uint size)
{
uint cnt;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index bad2b12604f1..873a6aef1c5c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -34,6 +34,20 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
+/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
+typedef struct {
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+} be_id_t;
+
+/* Little endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
+typedef struct {
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+} le_id_t;
+
#include "qla_bsg.h"
#include "qla_dsd.h"
#include "qla_nx.h"
@@ -117,9 +131,9 @@
#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr)
#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr)
#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr)
-#define WRT_REG_BYTE(addr, data) writeb(data,addr)
-#define WRT_REG_WORD(addr, data) writew(data,addr)
-#define WRT_REG_DWORD(addr, data) writel(data,addr)
+#define WRT_REG_BYTE(addr, data) writeb(data, addr)
+#define WRT_REG_WORD(addr, data) writew(data, addr)
+#define WRT_REG_DWORD(addr, data) writel(data, addr)
/*
* ISP83XX specific remote register addresses
@@ -207,7 +221,7 @@
* 133Mhz slot.
*/
#define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr))
-#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr))
+#define WRT_REG_WORD_PIO(addr, data) (outw(data, (unsigned long)addr))
/*
* Fibre Channel device definitions.
@@ -303,7 +317,8 @@ struct srb_cmd {
uint32_t request_sense_length;
uint32_t fw_sense_length;
uint8_t *request_sense_ptr;
- void *ctx;
+ struct ct6_dsd *ct6_ctx;
+ struct crc_context *crc_ctx;
};
/*
@@ -343,6 +358,51 @@ typedef union {
} port_id_t;
#define INVALID_PORT_ID 0xFFFFFF
+static inline le_id_t be_id_to_le(be_id_t id)
+{
+ le_id_t res;
+
+ res.domain = id.domain;
+ res.area = id.area;
+ res.al_pa = id.al_pa;
+
+ return res;
+}
+
+static inline be_id_t le_id_to_be(le_id_t id)
+{
+ be_id_t res;
+
+ res.domain = id.domain;
+ res.area = id.area;
+ res.al_pa = id.al_pa;
+
+ return res;
+}
+
+static inline port_id_t be_to_port_id(be_id_t id)
+{
+ port_id_t res;
+
+ res.b.domain = id.domain;
+ res.b.area = id.area;
+ res.b.al_pa = id.al_pa;
+ res.b.rsvd_1 = 0;
+
+ return res;
+}
+
+static inline be_id_t port_id_to_be_id(port_id_t port_id)
+{
+ be_id_t res;
+
+ res.domain = port_id.b.domain;
+ res.area = port_id.b.area;
+ res.al_pa = port_id.b.al_pa;
+
+ return res;
+}
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -395,7 +455,7 @@ struct srb_iocb {
struct els_logo_payload *els_logo_pyld;
dma_addr_t els_logo_pyld_dma;
} els_logo;
- struct {
+ struct els_plogi {
#define ELS_DCMD_PLOGI 0x3
uint32_t flags;
uint32_t els_cmd;
@@ -537,6 +597,7 @@ typedef struct srb {
wait_queue_head_t nvme_ls_waitq;
struct fc_port *fcport;
struct scsi_qla_host *vha;
+ unsigned int start_timer:1;
uint32_t handle;
uint16_t flags;
uint16_t type;
@@ -554,14 +615,22 @@ typedef struct srb {
struct bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
- void (*done)(void *, int);
- void (*free)(void *);
+ /*
+ * Report completion status @res and call sp_put(@sp). @res is
+ * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a
+ * QLA_* status value.
+ */
+ void (*done)(struct srb *sp, int res);
+ /* Stop the timer and free @sp. Only used by the FCP code. */
+ void (*free)(struct srb *sp);
+ /*
+ * Call nvme_private->fd->done() and free @sp. Only used by the NVMe
+ * code.
+ */
void (*put_fn)(struct kref *kref);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
-#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
-#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
#define GET_CMD_SENSE_LEN(sp) \
(sp->u.scmd.request_sense_length)
@@ -921,6 +990,11 @@ struct mbx_cmd_32 {
#define MBS_LINK_DOWN_ERROR 0x400B
#define MBS_DIAG_ECHO_TEST_ERROR 0x400C
+static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
+{
+ return MBS_COMMAND_COMPLETE <= mbs && mbs <= MBS_DIAG_ECHO_TEST_ERROR;
+}
+
/*
* ISP mailbox asynchronous event status codes
*/
@@ -1851,7 +1925,7 @@ struct crc_context {
uint16_t reserved_2;
uint16_t reserved_3;
uint32_t reserved_4;
- struct dsd64 data_dsd;
+ struct dsd64 data_dsd[1];
uint32_t reserved_5[2];
uint32_t reserved_6;
} nobundling;
@@ -1861,7 +1935,7 @@ struct crc_context {
uint16_t reserved_1;
__le16 dseg_count; /* Data segment count */
uint32_t reserved_2;
- struct dsd64 data_dsd;
+ struct dsd64 data_dsd[1];
struct dsd64 dif_dsd;
} bundling;
} u;
@@ -2289,22 +2363,6 @@ enum login_state { /* FW control Target side */
DSC_LS_LOGO_PEND,
};
-enum fcport_mgt_event {
- FCME_RELOGIN = 1,
- FCME_RSCN,
- FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
- FCME_PRLI_DONE,
- FCME_GNL_DONE,
- FCME_GPSC_DONE,
- FCME_GPDB_DONE,
- FCME_GPNID_DONE,
- FCME_GFFID_DONE,
- FCME_ADISC_DONE,
- FCME_GNNID_DONE,
- FCME_GFPNID_DONE,
- FCME_ELS_PLOGI_DONE,
-};
-
enum rscn_addr_format {
RSCN_PORT_ADDR,
RSCN_AREA_ADDR,
@@ -2422,7 +2480,6 @@ typedef struct fc_port {
#define QLA_FCPORT_FOUND 2
struct event_arg {
- enum fcport_mgt_event event;
fc_port_t *fcport;
srb_t *sp;
port_id_t id;
@@ -2745,7 +2802,7 @@ struct ct_sns_req {
/* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */
struct {
uint8_t reserved;
- uint8_t port_id[3];
+ be_id_t port_id;
} port_id;
struct {
@@ -2764,13 +2821,13 @@ struct ct_sns_req {
struct {
uint8_t reserved;
- uint8_t port_id[3];
+ be_id_t port_id;
uint8_t fc4_types[32];
} rft_id;
struct {
uint8_t reserved;
- uint8_t port_id[3];
+ be_id_t port_id;
uint16_t reserved2;
uint8_t fc4_feature;
uint8_t fc4_type;
@@ -2778,7 +2835,7 @@ struct ct_sns_req {
struct {
uint8_t reserved;
- uint8_t port_id[3];
+ be_id_t port_id;
uint8_t node_name[8];
} rnn_id;
@@ -2865,7 +2922,7 @@ struct ct_rsp_hdr {
struct ct_sns_gid_pt_data {
uint8_t control_byte;
- uint8_t port_id[3];
+ be_id_t port_id;
};
/* It's the same for both GPN_FT and GNN_FT */
@@ -2895,7 +2952,7 @@ struct ct_sns_rsp {
union {
struct {
uint8_t port_type;
- uint8_t port_id[3];
+ be_id_t port_id;
uint8_t port_name[8];
uint8_t sym_port_name_len;
uint8_t sym_port_name[255];
@@ -3111,7 +3168,7 @@ struct isp_operations {
void (*update_fw_options) (struct scsi_qla_host *);
int (*load_risc) (struct scsi_qla_host *, uint32_t *);
- char * (*pci_info_str) (struct scsi_qla_host *, char *);
+ char * (*pci_info_str)(struct scsi_qla_host *, char *, size_t);
char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
irq_handler_t intr_handler;
@@ -3850,7 +3907,7 @@ struct qla_hw_data {
/* NVRAM configuration data */
#define MAX_NVRAM_SIZE 4096
-#define VPD_OFFSET MAX_NVRAM_SIZE / 2
+#define VPD_OFFSET (MAX_NVRAM_SIZE / 2)
uint16_t nvram_size;
uint16_t nvram_base;
void *nvram;
@@ -4628,6 +4685,7 @@ struct secure_flash_update_block_pk {
#define QLA_SUSPENDED 0x106
#define QLA_BUSY 0x107
#define QLA_ALREADY_REGISTERED 0x109
+#define QLA_OS_TIMER_EXPIRED 0x10a
#define NVRAM_DELAY() udelay(10)
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a432caebefec..0a6fb359f4d5 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -57,10 +57,9 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
{
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
- struct gid_list_info *gid_list;
+ struct gid_list_info *gid_list, *gid;
dma_addr_t gid_list_dma;
fc_port_t fc_port;
- char *id_iter;
int rc, i;
uint16_t entries, loop_id;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
@@ -82,13 +81,11 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
if (rc != QLA_SUCCESS)
goto out_free_id_list;
- id_iter = (char *)gid_list;
+ gid = gid_list;
seq_puts(s, "Port Name Port ID Loop ID\n");
for (i = 0; i < entries; i++) {
- struct gid_list_info *gid =
- (struct gid_list_info *)id_iter;
loop_id = le16_to_cpu(gid->loop_id);
memset(&fc_port, 0, sizeof(fc_port_t));
@@ -99,7 +96,7 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
fc_port.port_name, fc_port.d_id.b.domain,
fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
fc_port.loop_id);
- id_iter += ha->gid_list_info_size;
+ gid = (void *)gid + ha->gid_list_info_size;
}
out_free_id_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h
index 7479924ba422..20788054b91b 100644
--- a/drivers/scsi/qla2xxx/qla_dsd.h
+++ b/drivers/scsi/qla2xxx/qla_dsd.h
@@ -1,6 +1,8 @@
#ifndef _QLA_DSD_H_
#define _QLA_DSD_H_
+#include <asm/unaligned.h>
+
/* 32-bit data segment descriptor (8 bytes) */
struct dsd32 {
__le32 address;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index df079a8c2b33..732bb871c433 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -761,13 +761,13 @@ struct els_entry_24xx {
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
- uint32_t rx_byte_count;
- uint32_t tx_byte_count;
+ __le32 rx_byte_count;
+ __le32 tx_byte_count;
__le64 tx_address __packed; /* Data segment 0 address. */
- uint32_t tx_len; /* Data segment 0 length. */
+ __le32 tx_len; /* Data segment 0 length. */
__le64 rx_address __packed; /* Data segment 1 address. */
- uint32_t rx_len; /* Data segment 1 length. */
+ __le32 rx_len; /* Data segment 1 length. */
};
struct els_sts_entry_24xx {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f9669fdf7798..d11416dcee4e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -45,6 +45,8 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
+extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
+ struct els_plogi *els_plogi);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
@@ -96,7 +98,11 @@ extern int qla2x00_init_rings(scsi_qla_host_t *);
extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
int, int, bool);
extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
-void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *);
+void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea);
+void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha,
+ struct event_arg *ea);
+void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
+ struct event_arg *ea);
int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
@@ -213,9 +219,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
-extern void qla2x00_sp_compl(void *, int);
-extern void qla2xxx_qpair_sp_free_dma(void *);
-extern void qla2xxx_qpair_sp_compl(void *, int);
+extern void qla2x00_sp_compl(srb_t *sp, int);
+extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
+extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
extern void qla24xx_sched_upd_fcport(fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
@@ -244,7 +250,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
-extern void qla2x00_sp_free_dma(void *);
+extern void qla2x00_sp_free_dma(srb_t *sp);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -272,6 +278,7 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *,
uint16_t, struct req_que *);
+extern uint32_t qla2xxx_get_next_handle(struct req_que *req);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *,
@@ -554,7 +561,7 @@ fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
* Global Function Prototypes in qla_sup.c source file.
*/
extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
-extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
+extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
uint32_t);
@@ -630,7 +637,7 @@ extern ulong qla27xx_fwdt_template_size(void *);
extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
extern void ql_dump_regs(uint, scsi_qla_host_t *, uint);
-extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, void *, uint);
+extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, const void *, uint);
/*
* Global Function Prototypes in qla_gs.c source file.
*/
@@ -732,7 +739,7 @@ extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
extern int qlafx00_soft_reset(scsi_qla_host_t *);
extern int qlafx00_chip_diag(scsi_qla_host_t *);
extern void qlafx00_config_rings(struct scsi_qla_host *);
-extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *, size_t);
extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
@@ -790,10 +797,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
-extern void qla2x00_sp_free(void *);
+extern void qla2x00_sp_free(srb_t *sp);
extern void qla2x00_sp_timeout(struct timer_list *);
-extern void qla2x00_bsg_job_done(void *, int);
-extern void qla2x00_bsg_sp_free(void *);
+extern void qla2x00_bsg_job_done(srb_t *sp, int);
+extern void qla2x00_bsg_sp_free(srb_t *sp);
extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
@@ -822,8 +829,8 @@ extern int qla82xx_device_state_handler(scsi_qla_host_t *);
extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
-extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
- size_t, char *);
+extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, size_t,
+ const char *);
extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9f58e591666d..dc0e36676313 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -226,9 +226,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id */
- ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
- ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
- ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -242,9 +240,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
rval = QLA_FUNCTION_FAILED;
} else {
/* Populate fc_port_t entry. */
- fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
- fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
- fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
+ fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
WWN_SIZE);
@@ -337,9 +333,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
/* Set port IDs in switch info list. */
for (i = 0; i < ha->max_fibre_devices; i++) {
gid_data = &ct_rsp->rsp.gid_pt.entries[i];
- list[i].d_id.b.domain = gid_data->port_id[0];
- list[i].d_id.b.area = gid_data->port_id[1];
- list[i].d_id.b.al_pa = gid_data->port_id[2];
+ list[i].d_id = be_to_port_id(gid_data->port_id);
memset(list[i].fabric_port_name, 0, WWN_SIZE);
list[i].fp_speed = PORT_SPEED_UNKNOWN;
@@ -403,9 +397,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id */
- ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
- ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
- ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -472,9 +464,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id */
- ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
- ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
- ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -509,9 +499,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
-static void qla2x00_async_sns_sp_done(void *s, int rc)
+static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct ct_sns_pkt *ct_sns;
struct qla_work_evt *e;
@@ -639,9 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, FC-4 types */
- ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
- ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
- ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
+ ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
if (vha->flags.nvme_enabled)
@@ -737,9 +724,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
- ct_req->req.rff_id.port_id[0] = d_id->b.domain;
- ct_req->req.rff_id.port_id[1] = d_id->b.area;
- ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
+ ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
ct_req->req.rff_id.fc4_feature = fc4feature;
ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
@@ -830,9 +815,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, node_name */
- ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
- ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
- ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
+ ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
@@ -1479,7 +1462,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
return ct_pkt;
}
-static inline ms_iocb_entry_t *
+static void
qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
{
struct qla_hw_data *ha = vha->hw;
@@ -1493,8 +1476,6 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
ms_pkt->req_bytecount = cpu_to_le32(req_size);
ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
}
-
- return ms_pkt;
}
/**
@@ -1557,7 +1538,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Attributes */
ct_req->req.rhba.attrs.count =
cpu_to_be32(FDMI_HBA_ATTR_COUNT);
- entries = ct_req->req.rhba.hba_identifier;
+ entries = &ct_req->req;
/* Nodename. */
eiter = entries + size;
@@ -1766,7 +1747,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
/* Attributes */
ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
- entries = ct_req->req.rpa.port_name;
+ entries = &ct_req->req;
/* FC4 types. */
eiter = entries + size;
@@ -1979,7 +1960,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
/* Attributes */
ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
- entries = ct_req->req.rhba2.hba_identifier;
+ entries = &ct_req->req;
/* Nodename. */
eiter = entries + size;
@@ -2338,7 +2319,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
/* Attributes */
ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
- entries = ct_req->req.rpa2.port_name;
+ entries = &ct_req->req;
/* FC4 types. */
eiter = entries + size;
@@ -2730,9 +2711,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id */
- ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
- ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
- ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -2936,9 +2915,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare CT arguments -- port_id */
- ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
- ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
- ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -3011,9 +2988,8 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
qla_post_iidma_work(vha, fcport);
}
-static void qla24xx_async_gpsc_sp_done(void *s, int res)
+static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport = sp->fcport;
@@ -3055,11 +3031,10 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_GPSC_DONE;
ea.rc = res;
ea.fcport = fcport;
ea.sp = sp;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_gpsc_event(vha, &ea);
done:
sp->free(sp);
@@ -3144,17 +3119,7 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
switch (sp->type) {
case SRB_ELS_DCMD:
- if (c->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&vha->hw->pdev->dev,
- c->u.els_plogi.tx_size,
- c->u.els_plogi.els_plogi_pyld,
- c->u.els_plogi.els_plogi_pyld_dma);
-
- if (c->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&vha->hw->pdev->dev,
- c->u.els_plogi.rx_size,
- c->u.els_plogi.els_resp_pyld,
- c->u.els_plogi.els_resp_pyld_dma);
+ qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
break;
case SRB_CT_PTHRU_CMD:
default:
@@ -3280,9 +3245,8 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
}
}
-static void qla2x00_async_gpnid_sp_done(void *s, int res)
+static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
@@ -3295,22 +3259,19 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
if (res)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
- sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+ sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
ct_rsp->rsp.gpn_id.port_name);
else
ql_dbg(ql_dbg_disc, vha, 0x2066,
"Async done-%s good rscn gen %d ID %3phC. %8phC\n",
- sp->name, sp->gen1, ct_req->req.port_id.port_id,
+ sp->name, sp->gen1, &ct_req->req.port_id.port_id,
ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
ea.sp = sp;
- ea.id.b.domain = ct_req->req.port_id.port_id[0];
- ea.id.b.area = ct_req->req.port_id.port_id[1];
- ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
+ ea.id = be_to_port_id(ct_req->req.port_id.port_id);
ea.rc = res;
- ea.event = FCME_GPNID_DONE;
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
list_del(&sp->elem);
@@ -3329,25 +3290,22 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
return;
}
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_gpnid_event(vha, &ea);
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
/* please ignore kernel warning. otherwise, we have mem leak. */
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
sp->free(sp);
return;
@@ -3419,9 +3377,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
/* GPN_ID req */
- ct_req->req.port_id.port_id[0] = id->b.domain;
- ct_req->req.port_id.port_id[1] = id->b.area;
- ct_req->req.port_id.port_id[2] = id->b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(*id);
sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
@@ -3432,7 +3388,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
ql_dbg(ql_dbg_disc, vha, 0x2067,
"Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, ct_req->req.port_id.port_id);
+ sp->handle, &ct_req->req.port_id.port_id);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -3467,54 +3423,52 @@ done:
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport = ea->fcport;
+ fc_port_t *fcport = ea->fcport;
- qla24xx_post_gnl_work(vha, fcport);
+ qla24xx_post_gnl_work(vha, fcport);
}
-void qla24xx_async_gffid_sp_done(void *s, int res)
+void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
- struct scsi_qla_host *vha = sp->vha;
- fc_port_t *fcport = sp->fcport;
- struct ct_sns_rsp *ct_rsp;
- struct event_arg ea;
-
- ql_dbg(ql_dbg_disc, vha, 0x2133,
- "Async done-%s res %x ID %x. %8phC\n",
- sp->name, res, fcport->d_id.b24, fcport->port_name);
-
- fcport->flags &= ~FCF_ASYNC_SENT;
- ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
- /*
- * FC-GS-7, 5.2.3.12 FC-4 Features - format
- * The format of the FC-4 Features object, as defined by the FC-4,
- * Shall be an array of 4-bit values, one for each type code value
- */
- if (!res) {
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
- /* w1 b00:03 */
- fcport->fc4_type =
- ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
- fcport->fc4_type &= 0xf;
- }
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ struct ct_sns_rsp *ct_rsp;
+ struct event_arg ea;
- if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
- /* w5 [00:03]/28h */
- fcport->fc4f_nvme =
- ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
- fcport->fc4f_nvme &= 0xf;
+ ql_dbg(ql_dbg_disc, vha, 0x2133,
+ "Async done-%s res %x ID %x. %8phC\n",
+ sp->name, res, fcport->d_id.b24, fcport->port_name);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ /*
+ * FC-GS-7, 5.2.3.12 FC-4 Features - format
+ * The format of the FC-4 Features object, as defined by the FC-4,
+ * Shall be an array of 4-bit values, one for each type code value
+ */
+ if (!res) {
+ if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+ /* w1 b00:03 */
+ fcport->fc4_type =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fcport->fc4_type &= 0xf;
}
- }
- memset(&ea, 0, sizeof(ea));
- ea.sp = sp;
- ea.fcport = sp->fcport;
- ea.rc = res;
- ea.event = FCME_GFFID_DONE;
+ if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+ /* w5 [00:03]/28h */
+ fcport->fc4f_nvme =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ fcport->fc4f_nvme &= 0xf;
+ }
+ }
+
+ memset(&ea, 0, sizeof(ea));
+ ea.sp = sp;
+ ea.fcport = sp->fcport;
+ ea.rc = res;
- qla2x00_fcport_event_handler(vha, &ea);
- sp->free(sp);
+ qla24xx_handle_gffid_event(vha, &ea);
+ sp->free(sp);
}
/* Get FC4 Feature with Nport ID. */
@@ -3674,7 +3628,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
- fcport->scan_needed = 0;
fcport->scan_state = QLA_FCPORT_FOUND;
found = true;
/*
@@ -3683,10 +3636,12 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
qla2x00_clear_loop_id(fcport);
fcport->flags |= FCF_FABRIC_DEVICE;
- } else if (fcport->d_id.b24 != rp->id.b24) {
+ } else if (fcport->d_id.b24 != rp->id.b24 ||
+ fcport->scan_needed) {
qlt_schedule_sess_for_deletion(fcport);
}
fcport->d_id.b24 = rp->id.b24;
+ fcport->scan_needed = 0;
break;
}
@@ -3898,9 +3853,8 @@ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
}
}
-static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
@@ -4053,9 +4007,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_flags &= ~SF_SCANNING;
- spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
}
@@ -4079,6 +4030,17 @@ done_free_sp:
sp->free(sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
+ }
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+
return rval;
} /* GNNFT */
@@ -4152,7 +4114,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
rspsz,
&sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
"Failed to allocate ct_sns request.\n");
@@ -4208,9 +4170,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_flags &= ~SF_SCANNING;
- spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
}
@@ -4234,6 +4193,17 @@ done_free_sp:
sp->free(sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
+ }
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+
return rval;
}
@@ -4261,9 +4231,8 @@ void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
qla24xx_post_gnl_work(vha, ea->fcport);
}
-static void qla2x00_async_gnnid_sp_done(void *s, int res)
+static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
fc_port_t *fcport = sp->fcport;
u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
@@ -4279,13 +4248,12 @@ static void qla2x00_async_gnnid_sp_done(void *s, int res)
ea.fcport = fcport;
ea.sp = sp;
ea.rc = res;
- ea.event = FCME_GNNID_DONE;
ql_dbg(ql_dbg_disc, vha, 0x204f,
"Async done-%s res %x, WWPN %8phC %8phC\n",
sp->name, res, fcport->port_name, fcport->node_name);
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_gnnid_event(vha, &ea);
sp->free(sp);
}
@@ -4318,9 +4286,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
GNN_ID_RSP_SIZE);
/* GNN_ID req */
- ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
- ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
- ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
/* req & rsp use the same buffer */
@@ -4396,9 +4362,8 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
qla24xx_post_gpsc_work(vha, fcport);
}
-static void qla2x00_async_gfpnid_sp_done(void *s, int res)
+static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
fc_port_t *fcport = sp->fcport;
u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
@@ -4413,13 +4378,12 @@ static void qla2x00_async_gfpnid_sp_done(void *s, int res)
ea.fcport = fcport;
ea.sp = sp;
ea.rc = res;
- ea.event = FCME_GFPNID_DONE;
ql_dbg(ql_dbg_disc, vha, 0x204f,
"Async done-%s res %x, WWPN %8phC %8phC\n",
sp->name, res, fcport->port_name, fcport->fabric_port_name);
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_gfpnid_event(vha, &ea);
sp->free(sp);
}
@@ -4450,9 +4414,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
GFPN_ID_RSP_SIZE);
/* GFPN_ID req */
- ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
- ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
- ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+ ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
/* req & rsp use the same buffer */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d4c3baec9172..643d2324082e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -37,8 +37,8 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
-static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
- struct event_arg *);
+static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
+ struct event_arg *ea);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
struct event_arg *);
static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
@@ -63,10 +63,8 @@ qla2x00_sp_timeout(struct timer_list *t)
iocb->timeout(sp);
}
-void
-qla2x00_sp_free(void *ptr)
+void qla2x00_sp_free(srb_t *sp)
{
- srb_t *sp = ptr;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
del_timer(&iocb->timer);
@@ -99,22 +97,33 @@ static void qla24xx_abort_iocb_timeout(void *data)
{
srb_t *sp = data;
struct srb_iocb *abt = &sp->u.iocb_cmd;
+ struct qla_qpair *qpair = sp->qpair;
+ u32 handle;
+ unsigned long flags;
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
+ /* removing the abort */
+ if (qpair->req->outstanding_cmds[handle] == sp) {
+ qpair->req->outstanding_cmds[handle] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
abt->u.abt.comp_status = CS_TIMEOUT;
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ sp->done(sp, QLA_OS_TIMER_EXPIRED);
}
-static void qla24xx_abort_sp_done(void *ptr, int res)
+static void qla24xx_abort_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
- if (del_timer(&sp->u.iocb_cmd.timer)) {
- if (sp->flags & SRB_WAKEUP_ON_COMP)
- complete(&abt->u.abt.comp);
- else
- sp->free(sp);
- }
+ del_timer(&sp->u.iocb_cmd.timer);
+ if (sp->flags & SRB_WAKEUP_ON_COMP)
+ complete(&abt->u.abt.comp);
+ else
+ sp->free(sp);
}
static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
@@ -127,7 +136,7 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
GFP_ATOMIC);
if (!sp)
- goto done;
+ return rval;
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
@@ -151,20 +160,18 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
cmd_sp->type);
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
+ if (rval != QLA_SUCCESS) {
+ sp->free(sp);
+ return rval;
+ }
if (wait) {
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_FUNCTION_FAILED;
- } else {
- goto done;
+ sp->free(sp);
}
-done_free_sp:
- sp->free(sp);
-done:
return rval;
}
@@ -234,13 +241,15 @@ qla2x00_async_iocb_timeout(void *data)
sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
break;
+ default:
+ WARN_ON_ONCE(true);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ break;
}
}
-static void
-qla2x00_async_login_sp_done(void *ptr, int res)
+static void qla2x00_async_login_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
@@ -252,14 +261,13 @@ qla2x00_async_login_sp_done(void *ptr, int res)
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_PLOGI_DONE;
ea.fcport = sp->fcport;
ea.data[0] = lio->u.logio.data[0];
ea.data[1] = lio->u.logio.data[1];
ea.iop[0] = lio->u.logio.iop[0];
ea.iop[1] = lio->u.logio.iop[1];
ea.sp = sp;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_plogi_done_event(vha, &ea);
}
sp->free(sp);
@@ -289,8 +297,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+ fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC - not sending command.\n",
+ __func__, fcport->port_name);
+ return rval;
+ }
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -341,11 +354,8 @@ done:
return rval;
}
-static void
-qla2x00_async_logout_sp_done(void *ptr, int res)
+static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
-
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
sp->fcport->login_gen++;
qlt_logo_completion_handler(sp->fcport, res);
@@ -359,9 +369,6 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
-
fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -405,10 +412,8 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
qlt_logo_completion_handler(fcport, data[0]);
}
-static void
-qla2x00_async_prlo_sp_done(void *s, int res)
+static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
{
- srb_t *sp = (srb_t *)s;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
@@ -469,6 +474,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+ WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
+ ea->data[0]);
+
if (ea->data[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
@@ -511,10 +519,8 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return qla2x00_post_work(vha, e);
}
-static void
-qla2x00_async_adisc_sp_done(void *ptr, int res)
+static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
struct event_arg ea;
struct srb_iocb *lio = &sp->u.iocb_cmd;
@@ -526,7 +532,6 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_ADISC_DONE;
ea.rc = res;
ea.data[0] = lio->u.logio.data[0];
ea.data[1] = lio->u.logio.data[1];
@@ -535,7 +540,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res)
ea.fcport = sp->fcport;
ea.sp = sp;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_adisc_event(vha, &ea);
sp->free(sp);
}
@@ -803,6 +808,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport->fw_login_state = current_login_state;
fcport->d_id = id;
switch (current_login_state) {
+ case DSC_LS_PRLI_PEND:
+ /*
+ * In the middle of PRLI. Let it finish.
+ * Allow relogin code to recheck state again
+ * with GNL. Push disc_state back to DELETED
+ * so GNL can go out again
+ */
+ fcport->disc_state = DSC_DELETED;
+ break;
case DSC_LS_PRLI_COMP:
if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
fcport->port_type = FCT_INITIATOR;
@@ -917,10 +931,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
}
} /* gnl_event */
-static void
-qla24xx_async_gnl_sp_done(void *s, int res)
+static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
unsigned long flags;
struct fc_port *fcport = NULL, *tf;
@@ -943,7 +955,6 @@ qla24xx_async_gnl_sp_done(void *s, int res)
memset(&ea, 0, sizeof(ea));
ea.sp = sp;
ea.rc = res;
- ea.event = FCME_GNL_DONE;
if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
sizeof(struct get_name_list_extended)) {
@@ -982,7 +993,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ea.fcport = fcport;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_gnl_done_event(vha, &ea);
}
/* create new fcport if fw has knowledge of new sessions */
@@ -1107,10 +1118,8 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return qla2x00_post_work(vha, e);
}
-static
-void qla24xx_async_gpdb_sp_done(void *s, int res)
+static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport = sp->fcport;
@@ -1129,11 +1138,10 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_GPDB_DONE;
ea.fcport = fcport;
ea.sp = sp;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_gpdb_event(vha, &ea);
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
@@ -1154,10 +1162,8 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return qla2x00_post_work(vha, e);
}
-static void
-qla2x00_async_prli_sp_done(void *ptr, int res)
+static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
@@ -1170,7 +1176,6 @@ qla2x00_async_prli_sp_done(void *ptr, int res)
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_PRLI_DONE;
ea.fcport = sp->fcport;
ea.data[0] = lio->u.logio.data[0];
ea.data[1] = lio->u.logio.data[1];
@@ -1178,7 +1183,7 @@ qla2x00_async_prli_sp_done(void *ptr, int res)
ea.iop[1] = lio->u.logio.iop[1];
ea.sp = sp;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_prli_done_event(vha, &ea);
}
sp->free(sp);
@@ -1262,8 +1267,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
+ fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC - not sending command.\n",
+ __func__, fcport->port_name);
return rval;
+ }
fcport->disc_state = DSC_GPDB;
@@ -1473,7 +1483,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
u64 wwn;
u16 sec;
- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
"%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
@@ -1484,6 +1494,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
if ((fcport->loop_id != FC_NO_LOOP_ID) &&
+ qla_dual_mode_enabled(vha) &&
((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND)))
return 0;
@@ -1636,12 +1647,34 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
return qla2x00_post_work(vha, e);
}
-static
+void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport;
+ unsigned long flags;
+
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (fcport) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
+ }
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return;
+
ql_dbg(ql_dbg_disc, vha, 0x2102,
"%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
__func__, fcport->port_name, fcport->disc_state,
@@ -1651,110 +1684,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
fcport->last_login_gen, fcport->login_gen,
fcport->flags);
- if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PRLI_PEND))
- return;
-
- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- return;
- }
- }
-
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
-
+ qla24xx_post_gnl_work(vha, fcport);
return;
}
qla24xx_fcport_handle_login(vha, fcport);
}
-
-static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
- struct event_arg *ea)
-{
- ql_dbg(ql_dbg_disc, vha, 0x2118,
- "%s %d %8phC post PRLI\n",
- __func__, __LINE__, ea->fcport->port_name);
- qla24xx_post_prli_work(vha, ea->fcport);
-}
-
-void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport;
-
- switch (ea->event) {
- case FCME_RELOGIN:
- if (test_bit(UNLOADING, &vha->dpc_flags))
- return;
-
- qla24xx_handle_relogin_event(vha, ea);
- break;
- case FCME_RSCN:
- if (test_bit(UNLOADING, &vha->dpc_flags))
- return;
- {
- unsigned long flags;
-
- fcport = qla2x00_find_fcport_by_nportid
- (vha, &ea->id, 1);
- if (fcport) {
- fcport->scan_needed = 1;
- fcport->rscn_gen++;
- }
-
- spin_lock_irqsave(&vha->work_lock, flags);
- if (vha->scan.scan_flags == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s: schedule\n", __func__);
- vha->scan.scan_flags |= SF_QUEUED;
- schedule_delayed_work(&vha->scan.scan_work, 5);
- }
- spin_unlock_irqrestore(&vha->work_lock, flags);
- }
- break;
- case FCME_GNL_DONE:
- qla24xx_handle_gnl_done_event(vha, ea);
- break;
- case FCME_GPSC_DONE:
- qla24xx_handle_gpsc_event(vha, ea);
- break;
- case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
- qla24xx_handle_plogi_done_event(vha, ea);
- break;
- case FCME_PRLI_DONE:
- qla24xx_handle_prli_done_event(vha, ea);
- break;
- case FCME_GPDB_DONE:
- qla24xx_handle_gpdb_event(vha, ea);
- break;
- case FCME_GPNID_DONE:
- qla24xx_handle_gpnid_event(vha, ea);
- break;
- case FCME_GFFID_DONE:
- qla24xx_handle_gffid_event(vha, ea);
- break;
- case FCME_ADISC_DONE:
- qla24xx_handle_adisc_event(vha, ea);
- break;
- case FCME_GNNID_DONE:
- qla24xx_handle_gnnid_event(vha, ea);
- break;
- case FCME_GFPNID_DONE:
- qla24xx_handle_gfpnid_event(vha, ea);
- break;
- case FCME_ELS_PLOGI_DONE:
- qla_handle_els_plogi_done(vha, ea);
- break;
- default:
- BUG_ON(1);
- break;
- }
-}
-
/*
* RSCN(s) came in for this fcport, but the RSCN(s) was not able
* to be consumed by the fcport
@@ -1772,10 +1711,9 @@ void qla_rscn_replay(fc_port_t *fcport)
if (fcport->scan_needed) {
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_RSCN;
ea.id = fcport->d_id;
ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
- qla2x00_fcport_event_handler(fcport->vha, &ea);
+ qla2x00_handle_rscn(fcport->vha, &ea);
}
}
@@ -1789,10 +1727,8 @@ qla2x00_tmf_iocb_timeout(void *data)
complete(&tmf->u.tmf.comp);
}
-static void
-qla2x00_tmf_sp_done(void *ptr, int res)
+static void qla2x00_tmf_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct srb_iocb *tmf = &sp->u.iocb_cmd;
complete(&tmf->u.tmf.comp);
@@ -1890,6 +1826,9 @@ qla24xx_async_abort_command(srb_t *sp)
static void
qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
+ WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
+ ea->data[0]);
+
switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
ql_dbg(ql_dbg_disc, vha, 0x2118,
@@ -1929,7 +1868,7 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
}
}
-static void
+void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
@@ -1953,8 +1892,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
return;
}
- if (fcport->disc_state == DSC_DELETE_PEND)
+ if ((fcport->disc_state == DSC_DELETE_PEND) ||
+ (fcport->disc_state == DSC_DELETED)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
+ }
if (ea->sp->gen2 != fcport->login_gen) {
/* target side must have changed it. */
@@ -1972,6 +1914,9 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
return;
}
+ WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
+ ea->data[0]);
+
switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
/*
@@ -2266,6 +2211,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
rval = qla2x00_init_rings(vha);
+ /* No point in continuing if firmware initialization failed. */
+ if (rval != QLA_SUCCESS)
+ return rval;
+
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -3082,103 +3031,113 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
}
static void
-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+qla2x00_init_fce_trace(scsi_qla_host_t *vha)
{
int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
- if (ha->eft) {
+ if (!IS_FWI2_CAPABLE(ha))
+ return;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return;
+
+ if (ha->fce) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
- "%s: Offload Mem is already allocated.\n",
- __func__);
+ "%s: FCE Mem is already allocated.\n",
+ __func__);
return;
}
- if (IS_FWI2_CAPABLE(ha)) {
- /* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- goto try_eft;
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ ql_log(ql_log_warn, vha, 0x00be,
+ "Unable to allocate (%d KB) for FCE.\n",
+ FCE_SIZE / 1024);
+ return;
+ }
- if (ha->fce)
- dma_free_coherent(&ha->pdev->dev,
- FCE_SIZE, ha->fce, ha->fce_dma);
+ rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
+ ha->fce_mb, &ha->fce_bufs);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00bf,
+ "Unable to initialize FCE (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
+ return;
+ }
- /* Allocate memory for Fibre Channel Event Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- ql_log(ql_log_warn, vha, 0x00be,
- "Unable to allocate (%d KB) for FCE.\n",
- FCE_SIZE / 1024);
- goto try_eft;
- }
-
- rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
- ha->fce_mb, &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00bf,
- "Unable to initialize FCE (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
- tc_dma);
- ha->flags.fce_enabled = 0;
- goto try_eft;
- }
- ql_dbg(ql_dbg_init, vha, 0x00c0,
- "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
-
- ha->flags.fce_enabled = 1;
- ha->fce_dma = tc_dma;
- ha->fce = tc;
-
-try_eft:
- if (ha->eft)
- dma_free_coherent(&ha->pdev->dev,
- EFT_SIZE, ha->eft, ha->eft_dma);
+ ql_dbg(ql_dbg_init, vha, 0x00c0,
+ "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
- /* Allocate memory for Extended Trace Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- ql_log(ql_log_warn, vha, 0x00c1,
- "Unable to allocate (%d KB) for EFT.\n",
- EFT_SIZE / 1024);
- goto eft_err;
- }
+ ha->flags.fce_enabled = 1;
+ ha->fce_dma = tc_dma;
+ ha->fce = tc;
+}
- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00c2,
- "Unable to initialize EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
- tc_dma);
- goto eft_err;
- }
- ql_dbg(ql_dbg_init, vha, 0x00c3,
- "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+static void
+qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ dma_addr_t tc_dma;
+ void *tc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return;
- ha->eft_dma = tc_dma;
- ha->eft = tc;
+ if (ha->eft) {
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "%s: EFT Mem is already allocated.\n",
+ __func__);
+ return;
}
-eft_err:
- return;
+ /* Allocate memory for Extended Trace Buffer. */
+ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
+ if (!tc) {
+ ql_log(ql_log_warn, vha, 0x00c1,
+ "Unable to allocate (%d KB) for EFT.\n",
+ EFT_SIZE / 1024);
+ return;
+ }
+
+ rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x00c2,
+ "Unable to initialize EFT (%d).\n", rval);
+ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
+ return;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+
+ ha->eft_dma = tc_dma;
+ ha->eft = tc;
+}
+
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+{
+ qla2x00_init_fce_trace(vha);
+ qla2x00_init_eft_trace(vha);
}
void
qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
{
- int rval;
uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
eft_size, fce_size, mq_size;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
struct qla2xxx_fw_dump *fw_dump;
- dma_addr_t tc_dma;
- void *tc;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
@@ -3216,37 +3175,13 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
}
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
- /* Allocate memory for Fibre Channel Event Buffer. */
- if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
- !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
- goto try_eft;
- fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
-try_eft:
+ qla2x00_init_fce_trace(vha);
+ if (ha->fce)
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+ qla2x00_init_eft_trace(vha);
if (ha->eft)
- dma_free_coherent(&ha->pdev->dev,
- EFT_SIZE, ha->eft, ha->eft_dma);
-
- /* Allocate memory for Extended Trace Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
- if (!tc) {
- ql_log(ql_log_warn, vha, 0x00c1,
- "Unable to allocate (%d KB) for EFT.\n",
- EFT_SIZE / 1024);
- goto allocate;
- }
-
- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00c2,
- "Unable to initialize EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
- tc_dma);
- }
- ql_dbg(ql_dbg_init, vha, 0x00c3,
- "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
- eft_size = EFT_SIZE;
+ eft_size = EFT_SIZE;
}
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -3268,24 +3203,22 @@ try_eft:
j, fwdt->dump_size);
dump_size += fwdt->dump_size;
}
- goto allocate;
+ } else {
+ req_q_size = req->length * sizeof(request_t);
+ rsp_q_size = rsp->length * sizeof(response_t);
+ dump_size = offsetof(struct qla2xxx_fw_dump, isp);
+ dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
+ + eft_size;
+ ha->chain_offset = dump_size;
+ dump_size += mq_size + fce_size;
+ if (ha->exchoffld_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exchoffld_size;
+ if (ha->exlogin_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exlogin_size;
}
- req_q_size = req->length * sizeof(request_t);
- rsp_q_size = rsp->length * sizeof(response_t);
- dump_size = offsetof(struct qla2xxx_fw_dump, isp);
- dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
- ha->chain_offset = dump_size;
- dump_size += mq_size + fce_size;
-
- if (ha->exchoffld_buf)
- dump_size += sizeof(struct qla2xxx_offld_chain) +
- ha->exchoffld_size;
- if (ha->exlogin_buf)
- dump_size += sizeof(struct qla2xxx_offld_chain) +
- ha->exlogin_size;
-
-allocate:
if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
ql_dbg(ql_dbg_init, vha, 0x00c5,
@@ -4400,7 +4333,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
inline void
qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
- char *def)
+ const char *def)
{
char *st, *en;
uint16_t index;
@@ -4412,7 +4345,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
if (len > sizeof(zero))
len = sizeof(zero);
if (memcmp(model, &zero, len) != 0) {
- strncpy(ha->model_number, model, len);
+ memcpy(ha->model_number, model, len);
st = en = ha->model_number;
en += len - 1;
while (en > st) {
@@ -4425,21 +4358,23 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
if (use_tbl &&
ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES)
- strncpy(ha->model_desc,
+ strlcpy(ha->model_desc,
qla2x00_model_name[index * 2 + 1],
- sizeof(ha->model_desc) - 1);
+ sizeof(ha->model_desc));
} else {
index = (ha->pdev->subsystem_device & 0xff);
if (use_tbl &&
ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
index < QLA_MODEL_NAMES) {
- strcpy(ha->model_number,
- qla2x00_model_name[index * 2]);
- strncpy(ha->model_desc,
+ strlcpy(ha->model_number,
+ qla2x00_model_name[index * 2],
+ sizeof(ha->model_number));
+ strlcpy(ha->model_desc,
qla2x00_model_name[index * 2 + 1],
- sizeof(ha->model_desc) - 1);
+ sizeof(ha->model_desc));
} else {
- strcpy(ha->model_number, def);
+ strlcpy(ha->model_number, def,
+ sizeof(ha->model_number));
}
}
if (IS_FWI2_CAPABLE(ha))
@@ -5044,7 +4979,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
uint16_t index;
uint16_t entries;
- char *id_iter;
+ struct gid_list_info *gid;
uint16_t loop_id;
uint8_t domain, area, al_pa;
struct qla_hw_data *ha = vha->hw;
@@ -5119,18 +5054,16 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
/* Add devices to port list. */
- id_iter = (char *)ha->gid_list;
+ gid = ha->gid_list;
for (index = 0; index < entries; index++) {
- domain = ((struct gid_list_info *)id_iter)->domain;
- area = ((struct gid_list_info *)id_iter)->area;
- al_pa = ((struct gid_list_info *)id_iter)->al_pa;
+ domain = gid->domain;
+ area = gid->area;
+ al_pa = gid->al_pa;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
- loop_id = (uint16_t)
- ((struct gid_list_info *)id_iter)->loop_id_2100;
+ loop_id = gid->loop_id_2100;
else
- loop_id = le16_to_cpu(
- ((struct gid_list_info *)id_iter)->loop_id);
- id_iter += ha->gid_list_info_size;
+ loop_id = le16_to_cpu(gid->loop_id);
+ gid = (void *)gid + ha->gid_list_info_size;
/* Bypass reserved domain fields. */
if ((domain & 0xf0) == 0xf0)
@@ -5355,7 +5288,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"%s %8phN. rport %p is %s mode\n",
__func__, fcport->port_name, rport,
(fcport->port_type == FCT_TARGET) ? "tgt" :
- ((fcport->port_type & FCT_NVME) ? "nvme" :"ini"));
+ ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
fc_remote_port_rolechg(rport, rport_ids.roles);
}
@@ -6596,7 +6529,8 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha)
LOOP_DOWN_TIME);
}
/* Wait for pending cmds to complete */
- qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST);
+ WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
+ != QLA_SUCCESS);
}
void
@@ -6684,8 +6618,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
/* Clear all async request states across all VPs. */
- list_for_each_entry(fcport, &vha->vp_fcports, list)
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ fcport->scan_state = 0;
+ }
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
atomic_inc(&vp->vref_count);
@@ -7519,8 +7455,12 @@ qla27xx_get_active_image(struct scsi_qla_host *vha,
goto check_sec_image;
}
- qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
- ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2);
+ if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
+ ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
+ QLA_SUCCESS) {
+ WARN_ON_ONCE(true);
+ goto check_sec_image;
+ }
qla27xx_print_image(vha, "Primary image", &pri_image_status);
if (qla27xx_check_image_status_signature(&pri_image_status)) {
@@ -8274,7 +8214,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
"primary" : "secondary");
}
- qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2);
+ ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
/* Get NVRAM data into cache and calculate checksum. */
faddr = ha->flt_region_nvram;
@@ -8286,7 +8226,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
"Loading %s nvram image.\n",
active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
"primary" : "secondary");
- qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2);
+ ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
dptr = (uint32_t *)nv;
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index bf063c664352..0c3d907af769 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -152,6 +152,18 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha)
return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
}
+static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
+ struct qla_qpair *qpair, fc_port_t *fcport)
+{
+ memset(sp, 0, sizeof(*sp));
+ sp->fcport = fcport;
+ sp->iocbs = 1;
+ sp->vha = vha;
+ sp->qpair = qpair;
+ sp->cmd_type = TYPE_SRB;
+ INIT_LIST_HEAD(&sp->elem);
+}
+
static inline srb_t *
qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
fc_port_t *fcport, gfp_t flag)
@@ -164,19 +176,9 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
return NULL;
sp = mempool_alloc(qpair->srb_mempool, flag);
- if (!sp)
- goto done;
-
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->iocbs = 1;
- sp->vha = vha;
- sp->qpair = qpair;
- sp->cmd_type = TYPE_SRB;
- INIT_LIST_HEAD(&sp->elem);
-
-done:
- if (!sp)
+ if (sp)
+ qla2xxx_init_sp(sp, vha, qpair, fcport);
+ else
QLA_QPAIR_MARK_NOT_BUSY(qpair);
return sp;
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 9312b19ed708..e92e52aa6e9b 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -292,6 +292,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
}
}
+/*
+ * Find the first handle that is not in use, starting from
+ * req->current_outstanding_cmd + 1. The caller must hold the lock that is
+ * associated with @req.
+ */
+uint32_t qla2xxx_get_next_handle(struct req_que *req)
+{
+ uint32_t index, handle = req->current_outstanding_cmd;
+
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ return handle;
+ }
+
+ return 0;
+}
+
/**
* qla2x00_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
@@ -306,7 +326,6 @@ qla2x00_start_scsi(srb_t *sp)
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
cmd_entry_t *cmd_pkt;
uint16_t cnt;
@@ -339,16 +358,8 @@ qla2x00_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -610,7 +621,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
}
cur_seg = scsi_sglist(cmd);
- ctx = GET_CMD_CTX_SP(sp);
+ ctx = sp->u.scmd.ct6_ctx;
while (tot_dsds) {
avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -943,8 +954,7 @@ alloc_and_fill:
if (sp) {
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)
- sp->u.scmd.ctx)->dsd_list);
+ &sp->u.scmd.crc_ctx->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
} else {
@@ -1041,8 +1051,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
if (sp) {
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)
- sp->u.scmd.ctx)->dsd_list);
+ &sp->u.scmd.crc_ctx->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
} else {
@@ -1088,7 +1097,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
sgl = scsi_prot_sglist(cmd);
vha = sp->vha;
- difctx = sp->u.scmd.ctx;
+ difctx = sp->u.scmd.crc_ctx;
direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
"%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
@@ -1364,6 +1373,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd++;
return 0;
}
+
/**
* qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
* Type 6 IOCB types.
@@ -1427,7 +1437,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
bundling = 0;
/* Allocate CRC context from global pool */
- crc_ctx_pkt = sp->u.scmd.ctx =
+ crc_ctx_pkt = sp->u.scmd.crc_ctx =
dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
if (!crc_ctx_pkt)
@@ -1515,7 +1525,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
}
if (!bundling) {
- cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
+ cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
} else {
/*
* Configure Bundling if we need to fetch interlaving
@@ -1525,7 +1535,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
tot_prot_dsds);
- cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
+ cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
}
/* Finish the common fields of CRC pkt */
@@ -1583,7 +1593,6 @@ qla24xx_start_scsi(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
struct cmd_type_7 *cmd_pkt;
uint16_t cnt;
@@ -1611,16 +1620,8 @@ qla24xx_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1723,7 +1724,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt = 0;
@@ -1764,17 +1764,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
-
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Compute number of required data segments */
@@ -1919,7 +1910,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
struct cmd_type_7 *cmd_pkt;
uint16_t cnt;
@@ -1950,16 +1940,8 @@ qla2xxx_start_scsi_mq(srb_t *sp)
vha->marker_needed = 0;
}
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -2063,7 +2045,6 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt = 0;
@@ -2118,17 +2099,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
vha->marker_needed = 0;
}
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
-
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Compute number of required data segments */
@@ -2275,7 +2247,7 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = qpair->req;
device_reg_t *reg = ISP_QUE_REG(ha, req->id);
- uint32_t index, handle;
+ uint32_t handle;
request_t *pkt;
uint16_t cnt, req_cnt;
@@ -2315,16 +2287,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
goto queuing_error;
if (sp) {
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds) {
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0) {
ql_log(ql_log_warn, vha, 0x700b,
"No room on outstanding cmd array.\n");
goto queuing_error;
@@ -2540,13 +2504,11 @@ void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->free = qla2x00_sp_free;
if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
- add_timer(&sp->u.iocb_cmd.timer);
+ sp->start_timer = 1;
}
-static void
-qla2x00_els_dcmd_sp_free(void *data)
+static void qla2x00_els_dcmd_sp_free(srb_t *sp)
{
- srb_t *sp = data;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
kfree(sp->fcport);
@@ -2576,10 +2538,8 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
complete(&lio->u.els_logo.comp);
}
-static void
-qla2x00_els_dcmd_sp_done(void *ptr, int res)
+static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
@@ -2699,16 +2659,16 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->s_id[0] = vha->d_id.b.al_pa;
els_iocb->s_id[1] = vha->d_id.b.area;
els_iocb->s_id[2] = vha->d_id.b.domain;
- els_iocb->control_flags = 0;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
+ els_iocb->control_flags = 0;
els_iocb->tx_byte_count = els_iocb->tx_len =
- sizeof(struct els_plogi_payload);
+ cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
&els_iocb->tx_address);
els_iocb->rx_dsd_count = 1;
els_iocb->rx_byte_count = els_iocb->rx_len =
- sizeof(struct els_plogi_payload);
+ cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
&els_iocb->rx_address);
@@ -2717,7 +2677,9 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
ql_dump_buffer(ql_log_info, vha, 0x0109,
(uint8_t *)els_iocb, 0x70);
} else {
- els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
+ els_iocb->control_flags = 1 << 13;
+ els_iocb->tx_byte_count =
+ cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
&els_iocb->tx_address);
els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
@@ -2755,10 +2717,23 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
sp->done(sp, QLA_FUNCTION_TIMEOUT);
}
-static void
-qla2x00_els_dcmd2_sp_done(void *ptr, int res)
+void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
+{
+ if (els_plogi->els_plogi_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ els_plogi->tx_size,
+ els_plogi->els_plogi_pyld,
+ els_plogi->els_plogi_pyld_dma);
+
+ if (els_plogi->els_resp_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ els_plogi->rx_size,
+ els_plogi->els_resp_pyld,
+ els_plogi->els_resp_pyld_dma);
+}
+
+static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
@@ -2780,26 +2755,16 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
} else {
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
- ea.rc = res;
- ea.event = FCME_ELS_PLOGI_DONE;
- qla2x00_fcport_event_handler(vha, &ea);
+ ea.data[0] = MBS_COMMAND_COMPLETE;
+ ea.sp = sp;
+ qla24xx_handle_plogi_done_event(vha, &ea);
}
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- if (elsio->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.tx_size,
- elsio->u.els_plogi.els_plogi_pyld,
- elsio->u.els_plogi.els_plogi_pyld_dma);
-
- if (elsio->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.rx_size,
- elsio->u.els_plogi.els_resp_pyld,
- elsio->u.els_plogi.els_resp_pyld_dma);
+ qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
sp->free(sp);
return;
}
@@ -2899,18 +2864,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
out:
fcport->flags &= ~(FCF_ASYNC_SENT);
- if (elsio->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.tx_size,
- elsio->u.els_plogi.els_plogi_pyld,
- elsio->u.els_plogi.els_plogi_pyld_dma);
-
- if (elsio->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.rx_size,
- elsio->u.els_plogi.els_resp_pyld,
- elsio->u.els_plogi.els_resp_pyld_dma);
-
+ qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
sp->free(sp);
done:
return rval;
@@ -3115,7 +3069,6 @@ qla82xx_start_scsi(srb_t *sp)
unsigned long flags;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt;
@@ -3155,16 +3108,8 @@ qla82xx_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -3235,7 +3180,7 @@ sufficient_dsds:
goto queuing_error;
}
- ctx = sp->u.scmd.ctx =
+ ctx = sp->u.scmd.ct6_ctx =
mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!ctx) {
ql_log(ql_log_fatal, vha, 0x3010,
@@ -3431,9 +3376,9 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- if (sp->u.scmd.ctx) {
- mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
- sp->u.scmd.ctx = NULL;
+ if (sp->u.scmd.crc_ctx) {
+ mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+ sp->u.scmd.crc_ctx = NULL;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3668,6 +3613,9 @@ qla2x00_start_sp(srb_t *sp)
break;
}
+ if (sp->start_timer)
+ add_timer(&sp->u.iocb_cmd.timer);
+
wmb();
qla2x00_start_iocbs(vha, qp->req);
done:
@@ -3769,7 +3717,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
uint32_t handle;
- uint32_t index;
uint16_t req_cnt;
uint16_t cnt;
uint32_t *clr_ptr;
@@ -3794,17 +3741,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
-
- if (index == req->num_outstanding_cmds) {
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0) {
rval = EXT_STATUS_BUSY;
goto queuing_error;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 78aec50abe0f..4c26630c1c3e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -776,7 +776,6 @@ skip_rio:
case MBA_LOOP_INIT_ERR:
ql_log(ql_log_warn, vha, 0x5090,
"LOOP INIT ERROR (%x).\n", mb[1]);
- ha->isp_ops->fw_dump(vha, 1);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
@@ -1119,10 +1118,9 @@ global_port_update:
struct event_arg ea;
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_RSCN;
ea.id.b24 = rscn_entry;
ea.id.b.rsvd_1 = rscn_entry >> 24;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla2x00_handle_rscn(vha, &ea);
qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
}
break;
@@ -1514,7 +1512,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
if (comp_status == CS_DATA_UNDERRUN) {
res = DID_OK << 16;
bsg_reply->reply_payload_rcv_len =
- le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+ le16_to_cpu(pkt->rsp_info_len);
ql_log(ql_log_warn, vha, 0x5048,
"CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
@@ -2257,11 +2255,8 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
struct bsg_job *bsg_job = NULL;
struct fc_bsg_request *bsg_request;
struct fc_bsg_reply *bsg_reply;
- sts_entry_t *sts;
- struct sts_entry_24xx *sts24;
-
- sts = (sts_entry_t *) pkt;
- sts24 = (struct sts_entry_24xx *) pkt;
+ sts_entry_t *sts = pkt;
+ struct sts_entry_24xx *sts24 = pkt;
/* Validate handle. */
if (index >= req->num_outstanding_cmds) {
@@ -2407,8 +2402,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
srb_t *sp;
fc_port_t *fcport;
struct scsi_cmnd *cp;
- sts_entry_t *sts;
- struct sts_entry_24xx *sts24;
+ sts_entry_t *sts = pkt;
+ struct sts_entry_24xx *sts24 = pkt;
uint16_t comp_status;
uint16_t scsi_status;
uint16_t ox_id;
@@ -2426,8 +2421,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
- sts = (sts_entry_t *) pkt;
- sts24 = (struct sts_entry_24xx *) pkt;
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -2727,7 +2720,7 @@ check_scsi_status:
"Port to be marked lost on fcport=%02x%02x%02x, current "
"port state= %s comp_status %x.\n", fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
- port_state_str[atomic_read(&fcport->state)],
+ port_state_str[FCS_ONLINE],
comp_status);
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -2844,6 +2837,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
if (sense_len == 0) {
rsp->status_srb = NULL;
sp->done(sp, cp->result);
+ } else {
+ WARN_ON_ONCE(true);
}
}
@@ -3471,10 +3466,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
ha->msix_count, ret);
goto msix_out;
} else if (ret < ha->msix_count) {
- ql_log(ql_log_warn, vha, 0x00c6,
- "MSI-X: Failed to enable support "
- "with %d vectors, using %d vectors.\n",
- ha->msix_count, ret);
+ ql_log(ql_log_info, vha, 0x00c6,
+ "MSI-X: Using %d vectors\n", ret);
ha->msix_count = ret;
/* Recalculate queue values */
if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 133f5f6270ff..4c858e2d0ea8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -253,21 +253,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- if (IS_P3P_TYPE(ha)) {
- if (RD_REG_DWORD(&reg->isp82.hint) &
- HINT_MBX_INT_PENDING) {
- ha->flags.mbox_busy = 0;
- spin_unlock_irqrestore(&ha->hardware_lock,
- flags);
-
- atomic_dec(&ha->num_pend_mbx_stage2);
- ql_dbg(ql_dbg_mbx, vha, 0x1010,
- "Pending mailbox timeout, exiting.\n");
- rval = QLA_FUNCTION_TIMEOUT;
- goto premature_exit;
- }
+ if (IS_P3P_TYPE(ha))
WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
- } else if (IS_FWI2_CAPABLE(ha))
+ else if (IS_FWI2_CAPABLE(ha))
WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
else
WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
@@ -394,8 +382,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
goto premature_exit;
}
- if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
+ if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x11ff,
+ "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
+ MBS_COMMAND_COMPLETE);
rval = QLA_FUNCTION_FAILED;
+ }
/* Load return mailbox registers. */
iptr2 = mcp->mb;
@@ -6213,10 +6205,8 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
-static void qla2x00_async_mb_sp_done(void *s, int res)
+static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
-
sp->u.iocb_cmd.u.mbx.rc = res;
complete(&sp->u.iocb_cmd.u.mbx.comp);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index b2977e49356b..1a9a11ae7285 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -901,10 +901,8 @@ failed:
return 0;
}
-static void qla_ctrlvp_sp_done(void *s, int res)
+static void qla_ctrlvp_sp_done(srb_t *sp, int res)
{
- struct srb *sp = s;
-
if (sp->comp)
complete(sp->comp);
/* don't free sp here. Let the caller do the free */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 942ee13b96a4..605b59c76c90 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,7 +10,6 @@
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
-#include <linux/bsg-lib.h>
#include <scsi/scsi_tcq.h>
#include <linux/utsname.h>
@@ -149,7 +148,8 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+ WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp,
+ mcp->tov * HZ) != 0);
} else {
ql_dbg(ql_dbg_mbx, vha, 0x112c,
"Cmd=%x Polling Mode.\n", command);
@@ -688,14 +688,12 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
}
char *
-qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
{
struct qla_hw_data *ha = vha->hw;
- if (pci_is_pcie(ha->pdev)) {
- strcpy(str, "PCIe iSA");
- return str;
- }
+ if (pci_is_pcie(ha->pdev))
+ strlcpy(str, "PCIe iSA", str_len);
return str;
}
@@ -1799,10 +1797,8 @@ qla2x00_fxdisc_iocb_timeout(void *data)
complete(&lio->u.fxiocb.fxiocb_comp);
}
-static void
-qla2x00_fxdisc_sp_done(void *ptr, int res)
+static void qla2x00_fxdisc_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
complete(&lio->u.fxiocb.fxiocb_comp);
@@ -1881,22 +1877,22 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
phost_info = &preg_hsi->hsi;
memset(preg_hsi, 0, sizeof(struct register_host_info));
phost_info->os_type = OS_TYPE_LINUX;
- strncpy(phost_info->sysname,
- p_sysid->sysname, SYSNAME_LENGTH);
- strncpy(phost_info->nodename,
- p_sysid->nodename, NODENAME_LENGTH);
+ strlcpy(phost_info->sysname, p_sysid->sysname,
+ sizeof(phost_info->sysname));
+ strlcpy(phost_info->nodename, p_sysid->nodename,
+ sizeof(phost_info->nodename));
if (!strcmp(phost_info->nodename, "(none)"))
ha->mr.host_info_resend = true;
- strncpy(phost_info->release,
- p_sysid->release, RELEASE_LENGTH);
- strncpy(phost_info->version,
- p_sysid->version, VERSION_LENGTH);
- strncpy(phost_info->machine,
- p_sysid->machine, MACHINE_LENGTH);
- strncpy(phost_info->domainname,
- p_sysid->domainname, DOMNAME_LENGTH);
- strncpy(phost_info->hostdriver,
- QLA2XXX_VERSION, VERSION_LENGTH);
+ strlcpy(phost_info->release, p_sysid->release,
+ sizeof(phost_info->release));
+ strlcpy(phost_info->version, p_sysid->version,
+ sizeof(phost_info->version));
+ strlcpy(phost_info->machine, p_sysid->machine,
+ sizeof(phost_info->machine));
+ strlcpy(phost_info->domainname, p_sysid->domainname,
+ sizeof(phost_info->domainname));
+ strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
+ sizeof(phost_info->hostdriver));
preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
ql_dbg(ql_dbg_init, vha, 0x0149,
"ISP%04X: Host registration with firmware\n",
@@ -1941,8 +1937,10 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
if (fx_type == FXDISC_GET_CONFIG_INFO) {
struct config_info_data *pinfo =
(struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
- strcpy(vha->hw->model_number, pinfo->model_num);
- strcpy(vha->hw->model_desc, pinfo->model_description);
+ strlcpy(vha->hw->model_number, pinfo->model_num,
+ ARRAY_SIZE(vha->hw->model_number));
+ strlcpy(vha->hw->model_desc, pinfo->model_description,
+ ARRAY_SIZE(vha->hw->model_desc));
memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
sizeof(vha->hw->mr.symbolic_name));
memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
@@ -2541,6 +2539,8 @@ check_scsi_status:
if (rsp->status_srb == NULL)
sp->done(sp, res);
+ else
+ WARN_ON_ONCE(true);
}
/**
@@ -2618,6 +2618,8 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
if (sense_len == 0) {
rsp->status_srb = NULL;
sp->done(sp, cp->result);
+ } else {
+ WARN_ON_ONCE(true);
}
}
@@ -3073,7 +3075,6 @@ qlafx00_start_scsi(srb_t *sp)
{
int nseg;
unsigned long flags;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt;
@@ -3097,16 +3098,8 @@ qlafx00_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 963094b3c300..6cc19e060afc 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -180,10 +180,9 @@ static void qla_nvme_ls_complete(struct work_struct *work)
kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
}
-static void qla_nvme_sp_ls_done(void *ptr, int res)
+static void qla_nvme_sp_ls_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
- struct nvme_private *priv;
+ struct nvme_private *priv = sp->priv;
if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
return;
@@ -191,17 +190,15 @@ static void qla_nvme_sp_ls_done(void *ptr, int res)
if (res)
res = -EINVAL;
- priv = (struct nvme_private *)sp->priv;
priv->comp_status = res;
INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
schedule_work(&priv->ls_work);
}
/* it assumed that QPair lock is held. */
-static void qla_nvme_sp_done(void *ptr, int res)
+static void qla_nvme_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
- struct nvme_private *priv = (struct nvme_private *)sp->priv;
+ struct nvme_private *priv = sp->priv;
priv->comp_status = res;
kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
@@ -222,7 +219,7 @@ static void qla_nvme_abort_work(struct work_struct *work)
"%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
__func__, sp, sp->handle, fcport, fcport->deleted);
- if (!ha->flags.fw_started && (fcport && fcport->deleted))
+ if (!ha->flags.fw_started && fcport->deleted)
goto out;
if (ha->flags.host_shutting_down) {
@@ -267,7 +264,6 @@ static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
schedule_work(&priv->abort_work);
}
-
static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
@@ -357,7 +353,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
{
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
struct cmd_nvme *cmd_pkt;
uint16_t cnt, i;
@@ -381,17 +376,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* Acquire qpair specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
-
- if (index == req->num_outstanding_cmds) {
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0) {
rval = -EBUSY;
goto queuing_error;
}
@@ -653,7 +639,9 @@ void qla_nvme_unregister_remote_port(struct fc_port *fcport)
"%s: unregister remoteport on %p %8phN\n",
__func__, fcport, fcport->port_name);
- nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+ if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
+
init_completion(&fcport->nvme_del_done);
ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
if (ret)
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index 67bb4a2a3742..ef912902d4e5 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -7,7 +7,6 @@
#ifndef __QLA_NVME_H
#define __QLA_NVME_H
-#include <linux/blk-mq.h>
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
#include <linux/nvme-fc-driver.h>
@@ -119,7 +118,7 @@ struct pt_ls4_rx_unsol {
uint32_t exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
- uint8_t s_id[3];
+ be_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
@@ -144,5 +143,5 @@ int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *);
void qla_nvme_delete(struct scsi_qla_host *);
void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *,
struct req_que *);
-void qla24xx_async_gffid_sp_done(void *, int);
+void qla24xx_async_gffid_sp_done(struct srb *sp, int);
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index c760ae354174..2b2028f2383e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1977,7 +1977,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
} while (--retries);
ql_log(ql_log_fatal, vha, 0x00ac,
- "Rcv Peg initializatin failed: 0x%x.\n", val);
+ "Rcv Peg initialization failed: 0x%x.\n", val);
read_lock(&ha->hw_lock);
qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
read_unlock(&ha->hw_lock);
@@ -1985,7 +1985,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
}
/* ISR related functions */
-static struct qla82xx_legacy_intr_set legacy_intr[] = \
+static struct qla82xx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
/*
@@ -2287,7 +2287,9 @@ qla82xx_disable_intrs(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- qla82xx_mbx_intr_disable(vha);
+ if (ha->interrupts_on)
+ qla82xx_mbx_intr_disable(vha);
+
spin_lock_irq(&ha->hardware_lock);
if (IS_QLA8044(ha))
qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
@@ -3286,7 +3288,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
case QLA8XXX_DEV_NEED_QUIESCENT:
qla82xx_need_qsnt_handler(vha);
/* Reset timeout value after quiescence handler */
- dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
* HZ);
break;
case QLA8XXX_DEV_QUIESCENT:
@@ -3301,7 +3303,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
qla82xx_idc_lock(ha);
/* Reset timeout value after quiescence handler */
- dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout
* HZ);
break;
case QLA8XXX_DEV_FAILED:
@@ -3686,7 +3688,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- if ((!sp->u.scmd.ctx ||
+ if ((!sp->u.scmd.crc_ctx ||
(sp->flags &
SRB_FCP_CMND_DMA_VALID)) &&
!ha->flags.isp82xx_fw_hung) {
@@ -3710,10 +3712,12 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
/* Wait for pending cmds (physical and virtual) to complete */
if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST)) {
+ WAIT_HOST) == QLA_SUCCESS) {
ql_dbg(ql_dbg_init, vha, 0x00b3,
"Done wait for "
"pending commands.\n");
+ } else {
+ WARN_ON_ONCE(true);
}
}
}
@@ -4232,7 +4236,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
- entry_hdr = (qla82xx_md_entry_hdr_t *) \
+ entry_hdr = (qla82xx_md_entry_hdr_t *)
(((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
/* Walk through the entry headers */
@@ -4339,7 +4343,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
data_collected = (uint8_t *)data_ptr -
(uint8_t *)ha->md_dump;
skip_nxt_entry:
- entry_hdr = (qla82xx_md_entry_hdr_t *) \
+ entry_hdr = (qla82xx_md_entry_hdr_t *)
(((uint8_t *)entry_hdr) + entry_hdr->entry_size);
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 3c7beef92c35..230abee10598 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -486,13 +486,13 @@
#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
-#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
-#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000
-#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000
-#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff
-#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000
-#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000
-#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff
+#define QLA82XX_PCI_CRBSPACE 0x06000000UL
+#define QLA82XX_PCI_DIRECT_CRB 0x04400000UL
+#define QLA82XX_PCI_CAMQM 0x04800000UL
+#define QLA82XX_PCI_CAMQM_MAX 0x04ffffffUL
+#define QLA82XX_PCI_DDR_NET 0x00000000UL
+#define QLA82XX_PCI_QDR_NET 0x04000000UL
+#define QLA82XX_PCI_QDR_NET_MAX 0x043fffffUL
/*
* Register offsets for MN
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 369ac04d0454..c056f466f1f4 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -2810,7 +2810,7 @@ error:
#define ISP8044_PEX_DMA_ENGINE_INDEX 8
#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
-#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000
+#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000UL
#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 98e60a34afd9..73db01e3b4e4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -69,7 +69,7 @@ MODULE_PARM_DESC(ql2xplogiabsentdevice,
"a Fabric scan. This is needed for several broken switches. "
"Default is 0 - no PLOGI. 1 - perform PLOGI.");
-int ql2xloginretrycount = 0;
+int ql2xloginretrycount;
module_param(ql2xloginretrycount, int, S_IRUGO);
MODULE_PARM_DESC(ql2xloginretrycount,
"Specify an alternate value for the NVRAM login retry count.");
@@ -234,7 +234,7 @@ MODULE_PARM_DESC(ql2xmdenable,
"0 - MiniDump disabled. "
"1 (Default) - MiniDump enabled.");
-int ql2xexlogins = 0;
+int ql2xexlogins;
module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xexlogins,
"Number of extended Logins. "
@@ -250,7 +250,7 @@ module_param(ql2xiniexchg, uint, 0644);
MODULE_PARM_DESC(ql2xiniexchg,
"Number of initiator exchanges.");
-int ql2xfwholdabts = 0;
+int ql2xfwholdabts;
module_param(ql2xfwholdabts, int, S_IRUGO);
MODULE_PARM_DESC(ql2xfwholdabts,
"Allow FW to hold status IOCB until ABTS rsp received. "
@@ -536,80 +536,70 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
}
static char *
-qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
+qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
{
struct qla_hw_data *ha = vha->hw;
- static char *pci_bus_modes[] = {
+ static const char *const pci_bus_modes[] = {
"33", "66", "100", "133",
};
uint16_t pci_bus;
- strcpy(str, "PCI");
pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
if (pci_bus) {
- strcat(str, "-X (");
- strcat(str, pci_bus_modes[pci_bus]);
+ snprintf(str, str_len, "PCI-X (%s MHz)",
+ pci_bus_modes[pci_bus]);
} else {
pci_bus = (ha->pci_attr & BIT_8) >> 8;
- strcat(str, " (");
- strcat(str, pci_bus_modes[pci_bus]);
+ snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
}
- strcat(str, " MHz)");
- return (str);
+ return str;
}
static char *
-qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
+qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
{
- static char *pci_bus_modes[] = { "33", "66", "100", "133", };
+ static const char *const pci_bus_modes[] = {
+ "33", "66", "100", "133",
+ };
struct qla_hw_data *ha = vha->hw;
uint32_t pci_bus;
if (pci_is_pcie(ha->pdev)) {
- char lwstr[6];
uint32_t lstat, lspeed, lwidth;
+ const char *speed_str;
pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
lspeed = lstat & PCI_EXP_LNKCAP_SLS;
lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
- strcpy(str, "PCIe (");
switch (lspeed) {
case 1:
- strcat(str, "2.5GT/s ");
+ speed_str = "2.5GT/s";
break;
case 2:
- strcat(str, "5.0GT/s ");
+ speed_str = "5.0GT/s";
break;
case 3:
- strcat(str, "8.0GT/s ");
+ speed_str = "8.0GT/s";
break;
default:
- strcat(str, "<unknown> ");
+ speed_str = "<unknown>";
break;
}
- snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
- strcat(str, lwstr);
+ snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
return str;
}
- strcpy(str, "PCI");
pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
- if (pci_bus == 0 || pci_bus == 8) {
- strcat(str, " (");
- strcat(str, pci_bus_modes[pci_bus >> 3]);
- } else {
- strcat(str, "-X ");
- if (pci_bus & BIT_2)
- strcat(str, "Mode 2");
- else
- strcat(str, "Mode 1");
- strcat(str, " (");
- strcat(str, pci_bus_modes[pci_bus & ~BIT_2]);
- }
- strcat(str, " MHz)");
+ if (pci_bus == 0 || pci_bus == 8)
+ snprintf(str, str_len, "PCI (%s MHz)",
+ pci_bus_modes[pci_bus >> 3]);
+ else
+ snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
+ pci_bus & 4 ? 2 : 1,
+ pci_bus_modes[pci_bus & 3]);
return str;
}
@@ -662,13 +652,10 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
return str;
}
-void
-qla2x00_sp_free_dma(void *ptr)
+void qla2x00_sp_free_dma(srb_t *sp)
{
- srb_t *sp = ptr;
struct qla_hw_data *ha = sp->vha->hw;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- void *ctx = GET_CMD_CTX_SP(sp);
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
@@ -681,24 +668,21 @@ qla2x00_sp_free_dma(void *ptr)
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
- if (!ctx)
- return;
-
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, ctx);
+ qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- struct crc_context *ctx0 = ctx;
+ struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = ctx;
+ struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
@@ -709,10 +693,8 @@ qla2x00_sp_free_dma(void *ptr)
}
}
-void
-qla2x00_sp_compl(void *ptr, int res)
+void qla2x00_sp_compl(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
@@ -727,16 +709,12 @@ qla2x00_sp_compl(void *ptr, int res)
cmd->scsi_done(cmd);
if (comp)
complete(comp);
- qla2x00_rel_sp(sp);
}
-void
-qla2xxx_qpair_sp_free_dma(void *ptr)
+void qla2xxx_qpair_sp_free_dma(srb_t *sp)
{
- srb_t *sp = (srb_t *)ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct qla_hw_data *ha = sp->fcport->vha->hw;
- void *ctx = GET_CMD_CTX_SP(sp);
if (sp->flags & SRB_DMA_VALID) {
scsi_dma_unmap(cmd);
@@ -749,17 +727,14 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
}
- if (!ctx)
- return;
-
if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
/* List assured to be having elements */
- qla2x00_clean_dsd_pool(ha, ctx);
+ qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
}
if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
- struct crc_context *difctx = ctx;
+ struct crc_context *difctx = sp->u.scmd.crc_ctx;
struct dsd_dma *dif_dsd, *nxt_dsd;
list_for_each_entry_safe(dif_dsd, nxt_dsd,
@@ -795,7 +770,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = ctx;
+ struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
@@ -807,17 +782,15 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
}
if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
- struct crc_context *ctx0 = ctx;
+ struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
- dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
+ dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
}
-void
-qla2xxx_qpair_sp_compl(void *ptr, int res)
+void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
{
- srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
@@ -832,7 +805,6 @@ qla2xxx_qpair_sp_compl(void *ptr, int res)
cmd->scsi_done(cmd);
if (comp)
complete(comp);
- qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
static int
@@ -845,9 +817,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
srb_t *sp;
int rval;
- struct qla_qpair *qpair = NULL;
- uint32_t tag;
- uint16_t hwq;
if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
WARN_ON_ONCE(!rport)) {
@@ -856,6 +825,10 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
if (ha->mqenable) {
+ uint32_t tag;
+ uint16_t hwq;
+ struct qla_qpair *qpair = NULL;
+
tag = blk_mq_unique_tag(cmd->request);
hwq = blk_mq_unique_tag_to_hwq(tag);
qpair = ha->queue_pair_map[hwq];
@@ -925,9 +898,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
else
goto qc24_target_busy;
- sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
- if (!sp)
- goto qc24_host_busy;
+ sp = scsi_cmd_priv(cmd);
+ qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
@@ -948,9 +920,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
qc24_host_busy_free_sp:
sp->free(sp);
-qc24_host_busy:
- return SCSI_MLQUEUE_HOST_BUSY;
-
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1011,9 +980,8 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
else
goto qc24_target_busy;
- sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
- if (!sp)
- goto qc24_host_busy;
+ sp = scsi_cmd_priv(cmd);
+ qla2xxx_init_sp(sp, vha, qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
@@ -1037,9 +1005,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
qc24_host_busy_free_sp:
sp->free(sp);
-qc24_host_busy:
- return SCSI_MLQUEUE_HOST_BUSY;
-
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1058,8 +1023,8 @@ qc24_fail_command:
* cmd = Scsi Command to wait on.
*
* Return:
- * Not Found : 0
- * Found : 1
+ * Completed in time : QLA_SUCCESS
+ * Did not complete in time : QLA_FUNCTION_FAILED
*/
static int
qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
@@ -1269,14 +1234,13 @@ static int
qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ DECLARE_COMPLETION_ONSTACK(comp);
srb_t *sp;
int ret;
unsigned int id;
uint64_t lun;
- unsigned long flags;
int rval;
struct qla_hw_data *ha = vha->hw;
- struct qla_qpair *qpair;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
@@ -1288,28 +1252,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (ret != 0)
return ret;
- sp = (srb_t *) CMD_SP(cmd);
- if (!sp)
- return SUCCESS;
-
- qpair = sp->qpair;
- if (!qpair)
- return SUCCESS;
+ sp = scsi_cmd_priv(cmd);
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (sp->type != SRB_SCSI_CMD || GET_CMD_SP(sp) != cmd) {
- /* there's a chance an interrupt could clear
- the ptr as part of done & free */
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ if (sp->fcport && sp->fcport->deleted)
return SUCCESS;
- }
- if (sp_get(sp)){
- /* ref_count is already 0 */
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ /* Return if the command has already finished. */
+ if (sp_get(sp))
return SUCCESS;
- }
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
id = cmd->device->id;
lun = cmd->device->lun;
@@ -1331,6 +1281,23 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
sp->done(sp, DID_ABORT << 16);
ret = SUCCESS;
break;
+ case QLA_FUNCTION_PARAMETER_ERROR: {
+ /* Wait for the command completion. */
+ uint32_t ratov = ha->r_a_tov/10;
+ uint32_t ratov_j = msecs_to_jiffies(4 * ratov * 1000);
+
+ WARN_ON_ONCE(sp->comp);
+ sp->comp = &comp;
+ if (!wait_for_completion_timeout(&comp, ratov_j)) {
+ ql_dbg(ql_dbg_taskm, vha, 0xffff,
+ "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
+ __func__, ha->r_a_tov);
+ ret = FAILED;
+ } else {
+ ret = SUCCESS;
+ }
+ break;
+ }
default:
/*
* Either abort failed or abort and completion raced. Let
@@ -1340,6 +1307,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
break;
}
+ sp->comp = NULL;
+ atomic_dec(&sp->ref_count);
ql_log(ql_log_info, vha, 0x801c,
"Abort command issued nexus=%ld:%d:%llu -- %x.\n",
vha->host_no, id, lun, ret);
@@ -1347,6 +1316,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
}
+/*
+ * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
+ */
int
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
uint64_t l, enum nexus_wait_type type)
@@ -1420,6 +1392,9 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
if (err != 0)
return err;
+ if (fcport->deleted)
+ return SUCCESS;
+
ql_log(ql_log_info, vha, 0x8009,
"%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
cmd->device->id, cmd->device->lun, cmd);
@@ -1534,6 +1509,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
return ret;
ret = FAILED;
+ if (qla2x00_chip_is_down(vha))
+ return ret;
+
ql_log(ql_log_info, vha, 0x8012,
"BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
@@ -1746,6 +1724,8 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
spin_lock_irqsave(qp->qp_lock_ptr, *flags);
sp->comp = NULL;
}
+
+ atomic_dec(&sp->ref_count);
}
static void
@@ -1800,8 +1780,13 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
int que;
struct qla_hw_data *ha = vha->hw;
+ /* Continue only if initialization complete. */
+ if (!ha->base_qpair)
+ return;
__qla2x00_abort_all_cmds(ha->base_qpair, res);
+ if (!ha->queue_pair_map)
+ return;
for (que = 0; que < ha->max_qpairs; que++) {
if (!ha->queue_pair_map[que])
continue;
@@ -2477,7 +2462,7 @@ static struct isp_operations qla27xx_isp_ops = {
.config_rings = qla24xx_config_rings,
.reset_adapter = qla24xx_reset_adapter,
.nvram_config = qla81xx_nvram_config,
- .update_fw_options = qla81xx_update_fw_options,
+ .update_fw_options = qla24xx_update_fw_options,
.load_risc = qla81xx_load_risc,
.pci_info_str = qla24xx_pci_info_str,
.fw_version_str = qla24xx_fw_version_str,
@@ -3154,6 +3139,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_log(ql_log_fatal, base_vha, 0x003d,
"Failed to allocate memory for queue pointers..."
"aborting.\n");
+ ret = -ENODEV;
goto probe_failed;
}
@@ -3418,7 +3404,8 @@ skip_dpc:
"QLogic %s - %s.\n", ha->model_number, ha->model_desc);
ql_log(ql_log_info, base_vha, 0x00fc,
"ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
- pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
+ pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
+ sizeof(pci_info)),
pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
base_vha->host_no,
ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
@@ -4598,6 +4585,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
ha->fce = NULL;
ha->fce_dma = 0;
+ ha->flags.fce_enabled = 0;
ha->eft = NULL;
ha->eft_dma = 0;
ha->fw_dumped = 0;
@@ -4716,7 +4704,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
- if (ql2xenabledif) {
+ if (ql2xenabledif && ha->dif_bundl_pool) {
struct dsd_dma *dsd, *nxt;
list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
@@ -4739,8 +4727,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
}
}
- if (ha->dif_bundl_pool)
- dma_pool_destroy(ha->dif_bundl_pool);
+ dma_pool_destroy(ha->dif_bundl_pool);
ha->dif_bundl_pool = NULL;
qlt_mem_free(ha);
@@ -4812,7 +4799,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
if (!vha->gnl.l) {
ql_log(ql_log_fatal, vha, 0xd04a,
"Alloc failed for name list.\n");
- scsi_remove_host(vha->host);
+ scsi_host_put(vha->host);
return NULL;
}
@@ -4825,7 +4812,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
vha->gnl.l, vha->gnl.ldma);
vha->gnl.l = NULL;
- scsi_remove_host(vha->host);
+ scsi_host_put(vha->host);
return NULL;
}
INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
@@ -5054,8 +5041,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
"%s %8phC mem alloc fail.\n",
__func__, e->u.new_sess.port_name);
- if (pla)
+ if (pla) {
+ list_del(&pla->list);
kmem_cache_free(qla_tgt_plogi_cachep, pla);
+ }
return;
}
@@ -5086,6 +5075,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (fcport) {
fcport->id_changed = 1;
fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
if (pla) {
@@ -5165,8 +5155,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (free_fcport) {
qla2x00_free_fcport(fcport);
- if (pla)
+ if (pla) {
+ list_del(&pla->list);
kmem_cache_free(qla_tgt_plogi_cachep, pla);
+ }
}
}
@@ -5346,9 +5338,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
} else {
if (vha->hw->current_topology != ISP_CFG_NL) {
memset(&ea, 0, sizeof(ea));
- ea.event = FCME_RELOGIN;
ea.fcport = fcport;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla24xx_handle_relogin_event(vha, &ea);
} else if (vha->hw->current_topology ==
ISP_CFG_NL) {
fcport->login_retry--;
@@ -5686,7 +5677,6 @@ exit:
void
qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
- uint16_t options = (requester_id << 15) | BIT_6;
uint32_t data;
uint32_t lock_owner;
struct qla_hw_data *ha = base_vha->hw;
@@ -5719,22 +5709,6 @@ retry_lock:
}
return;
-
- /* XXX: IDC-lock implementation using access-control mbx */
-retry_lock2:
- if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
- ql_dbg(ql_dbg_p3p, base_vha, 0xb072,
- "Failed to acquire IDC lock. retrying...\n");
- /* Retry/Perform IDC-Lock recovery */
- if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) {
- qla83xx_wait_logic();
- goto retry_lock2;
- } else
- ql_log(ql_log_warn, base_vha, 0xb076,
- "IDC Lock recovery FAILED.\n");
- }
-
- return;
}
void
@@ -7156,6 +7130,7 @@ struct scsi_host_template qla2xxx_driver_template = {
.supported_mode = MODE_INITIATOR,
.track_queue_depth = 1,
+ .cmd_size = sizeof(srb_t),
};
static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1eb82384d933..f2d5115b2d8d 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -473,22 +473,24 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data)
return QLA_FUNCTION_TIMEOUT;
}
-uint32_t *
+int
qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords)
{
ulong i;
+ int ret = QLA_SUCCESS;
struct qla_hw_data *ha = vha->hw;
/* Dword reads to flash. */
faddr = flash_data_addr(ha, faddr);
for (i = 0; i < dwords; i++, faddr++, dwptr++) {
- if (qla24xx_read_flash_dword(ha, faddr, dwptr))
+ ret = qla24xx_read_flash_dword(ha, faddr, dwptr);
+ if (ret != QLA_SUCCESS)
break;
cpu_to_le32s(dwptr);
}
- return dwptr;
+ return ret;
}
static int
@@ -680,8 +682,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_flt = flt_addr;
wptr = (uint16_t *)ha->flt;
- qla24xx_read_flash_data(vha, (void *)flt, flt_addr,
- (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE) >> 2);
+ ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2,
+ (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE));
if (le16_to_cpu(*wptr) == 0xffff)
goto no_flash_data;
@@ -948,11 +950,11 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
struct req_que *req = ha->req_q_map[0];
uint16_t cnt, chksum;
uint16_t *wptr = (void *)req->ring;
- struct qla_fdt_layout *fdt = (void *)req->ring;
+ struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring;
uint8_t man_id, flash_id;
uint16_t mid = 0, fid = 0;
- qla24xx_read_flash_data(vha, (void *)fdt, ha->flt_region_fdt,
+ ha->isp_ops->read_optrom(vha, fdt, ha->flt_region_fdt << 2,
OPTROM_BURST_DWORDS);
if (le16_to_cpu(*wptr) == 0xffff)
goto no_flash_data;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1c1f63be6eed..0ffda6171614 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -188,18 +188,17 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
- uint8_t *d_id)
+ be_id_t d_id)
{
struct scsi_qla_host *host;
- uint32_t key = 0;
+ uint32_t key;
- if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
- (vha->d_id.b.al_pa == d_id[2]))
+ if (vha->d_id.b.area == d_id.area &&
+ vha->d_id.b.domain == d_id.domain &&
+ vha->d_id.b.al_pa == d_id.al_pa)
return vha;
- key = (uint32_t)d_id[0] << 16;
- key |= (uint32_t)d_id[1] << 8;
- key |= (uint32_t)d_id[2];
+ key = be_to_port_id(d_id).b24;
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
@@ -357,9 +356,9 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe03e,
"qla_target(%d): Received ATIO_TYPE7 "
"with unknown d_id %x:%x:%x\n", vha->vp_idx,
- atio->u.isp24.fcp_hdr.d_id[0],
- atio->u.isp24.fcp_hdr.d_id[1],
- atio->u.isp24.fcp_hdr.d_id[2]);
+ atio->u.isp24.fcp_hdr.d_id.domain,
+ atio->u.isp24.fcp_hdr.d_id.area,
+ atio->u.isp24.fcp_hdr.d_id.al_pa);
qlt_queue_unknown_atio(vha, atio, ha_locked);
@@ -560,10 +559,8 @@ static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
return qla2x00_post_work(vha, e);
}
-static
-void qla2x00_async_nack_sp_done(void *s, int res)
+static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
{
- struct srb *sp = (struct srb *)s;
struct scsi_qla_host *vha = sp->vha;
unsigned long flags;
@@ -789,6 +786,8 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
{
struct qlt_plogi_ack_t *pla;
+ lockdep_assert_held(&vha->hw->hardware_lock);
+
list_for_each_entry(pla, &vha->plogi_ack_list, list) {
if (pla->id.b24 == id->b24) {
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
@@ -1209,7 +1208,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
sess->logout_on_delete = 0;
sess->logo_ack_needed = 0;
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
- sess->scan_state = 0;
}
}
@@ -1284,13 +1282,12 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
/* At this point tgt could be already dead */
}
-static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
uint16_t *loop_id)
{
struct qla_hw_data *ha = vha->hw;
dma_addr_t gid_list_dma;
- struct gid_list_info *gid_list;
- char *id_iter;
+ struct gid_list_info *gid_list, *gid;
int res, rc, i;
uint16_t entries;
@@ -1313,19 +1310,17 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
goto out_free_id_list;
}
- id_iter = (char *)gid_list;
+ gid = gid_list;
res = -ENOENT;
for (i = 0; i < entries; i++) {
- struct gid_list_info *gid = (struct gid_list_info *)id_iter;
-
- if ((gid->al_pa == s_id[2]) &&
- (gid->area == s_id[1]) &&
- (gid->domain == s_id[0])) {
+ if (gid->al_pa == s_id.al_pa &&
+ gid->area == s_id.area &&
+ gid->domain == s_id.domain) {
*loop_id = le16_to_cpu(gid->loop_id);
res = 0;
break;
}
- id_iter += ha->gid_list_info_size;
+ gid = (void *)gid + ha->gid_list_info_size;
}
out_free_id_list:
@@ -1582,11 +1577,10 @@ static void qlt_release(struct qla_tgt *tgt)
struct qla_qpair_hint *h;
struct qla_hw_data *ha = vha->hw;
- if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
- !tgt->tgt_stopped)
+ if (!tgt->tgt_stop && !tgt->tgt_stopped)
qlt_stop_phase1(tgt);
- if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+ if (!tgt->tgt_stopped)
qlt_stop_phase2(tgt);
for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
@@ -1772,12 +1766,8 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
resp->fcp_hdr_le.f_ctl[1] = *p++;
resp->fcp_hdr_le.f_ctl[2] = *p;
- resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
- resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
- resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
- resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
- resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
- resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+ resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
+ resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
@@ -1848,19 +1838,11 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
resp->fcp_hdr_le.f_ctl[1] = *p++;
resp->fcp_hdr_le.f_ctl[2] = *p;
if (ids_reversed) {
- resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
- resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
- resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
- resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
- resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
- resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
+ resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
} else {
- resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
- resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
- resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
- resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
- resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
- resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+ resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
+ resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
}
resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
if (status == FCP_TMF_CMPL) {
@@ -1927,18 +1909,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
if (mcmd) {
- ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0];
- ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
- ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];
+ ctio->initiator_id = entry->fcp_hdr_le.s_id;
if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
tmp |= (mcmd->abort_io_attr << 9);
else if (qpair->retry_term_cnt & 1)
tmp |= (0x4 << 9);
} else {
- ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
- ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
- ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+ ctio->initiator_id = entry->fcp_hdr_le.d_id;
if (qpair->retry_term_cnt & 1)
tmp |= (0x4 << 9);
@@ -1972,8 +1950,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
* XXX does not go through the list of other port (which may have cmds
* for the same lun)
*/
-static void abort_cmds_for_lun(struct scsi_qla_host *vha,
- u64 lun, uint8_t *s_id)
+static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
{
struct qla_tgt_sess_op *op;
struct qla_tgt_cmd *cmd;
@@ -2149,7 +2126,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
uint32_t tag = abts->exchange_addr_to_abort;
- uint8_t s_id[3];
+ be_id_t s_id;
int rc;
unsigned long flags;
@@ -2173,13 +2150,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
"qla_target(%d): task abort (s_id=%x:%x:%x, "
- "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
- abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+ "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
+ abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
le32_to_cpu(abts->fcp_hdr_le.parameter));
- s_id[0] = abts->fcp_hdr_le.s_id[2];
- s_id[1] = abts->fcp_hdr_le.s_id[1];
- s_id[2] = abts->fcp_hdr_le.s_id[0];
+ s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -2243,9 +2218,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
ctio->nport_handle = mcmd->sess->loop_id;
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = ha->vp_idx;
- ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
- ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
- ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
ctio->exchange_addr = atio->u.isp24.exchange_addr;
temp = (atio->u.isp24.attr << 9)|
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
@@ -2302,9 +2275,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
ctio->nport_handle = cmd->sess->loop_id;
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
- ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
- ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
- ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
ctio->exchange_addr = atio->u.isp24.exchange_addr;
temp = (atio->u.isp24.attr << 9) |
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
@@ -2605,9 +2576,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
- pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
- pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
- pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
pkt->exchange_addr = atio->u.isp24.exchange_addr;
temp = atio->u.isp24.attr << 9;
pkt->u.status0.flags |= cpu_to_le16(temp);
@@ -3120,9 +3089,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
- pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
- pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
- pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
pkt->exchange_addr = atio->u.isp24.exchange_addr;
/* silence compile warning */
@@ -3164,7 +3131,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
if (!bundling) {
- cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
+ cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
} else {
/*
* Configure Bundling if we need to fetch interlaving
@@ -3174,7 +3141,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
crc_ctx_pkt->u.bundling.dseg_count =
cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
- cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
+ cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
}
/* Finish the common fields of CRC pkt */
@@ -3239,7 +3206,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
cmd->state = QLA_TGT_STATE_PROCESSED;
- return 0;
+ res = 0;
+ goto free;
}
ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3250,9 +3218,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
- if (unlikely(res != 0)) {
- return res;
- }
+ if (unlikely(res != 0))
+ goto free;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
@@ -3272,7 +3239,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return 0;
+ res = 0;
+ goto free;
}
/* Does F/W have an IOCBs for this request */
@@ -3375,6 +3343,8 @@ out_unmap_unlock:
qlt_unmap_sg(vha, cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+free:
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
return res;
}
EXPORT_SYMBOL(qlt_xmit_response);
@@ -3672,9 +3642,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair,
ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
- ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
- ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
- ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
CTIO7_FLAGS_TERMINATE;
@@ -4107,8 +4075,6 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
return fcp_task_attr;
}
-static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
- uint8_t *);
/*
* Process context for I/O path into tcm_qla2xxx code
*/
@@ -4352,9 +4318,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENODEV;
}
- id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
- id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
- id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+ id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
if (IS_SW_RESV_ADDR(id))
return -EBUSY;
@@ -4716,6 +4680,8 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
struct qlt_plogi_ack_t *pla;
unsigned long flags;
+ lockdep_assert_held(&vha->hw->hardware_lock);
+
wwn = wwn_to_u64(iocb->u.isp24.port_name);
port_id.b.domain = iocb->u.isp24.port_id[2];
@@ -4799,8 +4765,10 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
__func__, sess->port_name, sec);
}
- if (!conflict_sess)
+ if (!conflict_sess) {
+ list_del(&pla->list);
kmem_cache_free(qla_tgt_plogi_cachep, pla);
+ }
qlt_send_term_imm_notif(vha, iocb, 1);
goto out;
@@ -4889,6 +4857,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
int res = 0;
unsigned long flags;
+ lockdep_assert_held(&ha->hardware_lock);
+
wwn = wwn_to_u64(iocb->u.isp24.port_name);
port_id.b.domain = iocb->u.isp24.port_id[2];
@@ -5165,6 +5135,8 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
int send_notify_ack = 1;
uint16_t status;
+ lockdep_assert_held(&ha->hardware_lock);
+
status = le16_to_cpu(iocb->u.isp2x.status);
switch (status) {
case IMM_NTFY_LIP_RESET:
@@ -5302,10 +5274,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
u16 temp;
port_id_t id;
- id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
- id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
- id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
- id.b.rsvd_1 = 0;
+ id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
@@ -5333,9 +5302,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair,
ctio24->nport_handle = sess->loop_id;
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio24->vp_index = vha->vp_idx;
- ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
- ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
- ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
temp = (atio->u.isp24.attr << 9) |
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
@@ -5767,7 +5734,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
entry->error_subcode2);
ha->tgt.tgt_ops->free_mcmd(mcmd);
}
- } else {
+ } else if (mcmd) {
ha->tgt.tgt_ops->free_mcmd(mcmd);
}
}
@@ -6121,21 +6088,21 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
/* Must be called under tgt_mutex */
static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
- uint8_t *s_id)
+ be_id_t s_id)
{
struct fc_port *sess = NULL;
fc_port_t *fcport = NULL;
int rc, global_resets;
uint16_t loop_id = 0;
- if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
+ if (s_id.domain == 0xFF && s_id.area == 0xFC) {
/*
* This is Domain Controller, so it should be
* OK to drop SCSI commands from it.
*/
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
"Unable to find initiator with S_ID %x:%x:%x",
- s_id[0], s_id[1], s_id[2]);
+ s_id.domain, s_id.area, s_id.al_pa);
return NULL;
}
@@ -6152,13 +6119,12 @@ retry:
ql_log(ql_log_info, vha, 0xf071,
"qla_target(%d): Unable to find "
"initiator with S_ID %x:%x:%x",
- vha->vp_idx, s_id[0], s_id[1],
- s_id[2]);
+ vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
if (rc == -ENOENT) {
qlt_port_logo_t logo;
- sid_to_portid(s_id, &logo.id);
+ logo.id = be_to_port_id(s_id);
logo.cmd_count = 1;
qlt_send_first_logo(vha, &logo);
}
@@ -6197,8 +6163,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess = NULL;
unsigned long flags = 0, flags2 = 0;
- uint32_t be_s_id;
- uint8_t s_id[3];
+ be_id_t s_id;
int rc;
spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
@@ -6206,12 +6171,9 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (tgt->tgt_stop)
goto out_term2;
- s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
- s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
- s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+ s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
- (unsigned char *)&be_s_id);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
if (!sess) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
@@ -6248,9 +6210,6 @@ static void qlt_abort_work(struct qla_tgt *tgt,
out_term2:
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
-
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
@@ -6266,7 +6225,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
struct qla_hw_data *ha = vha->hw;
struct fc_port *sess;
unsigned long flags;
- uint8_t *s_id = NULL; /* to hide compiler warnings */
+ be_id_t s_id;
int rc;
u64 unpacked_lun;
int fn;
@@ -6495,22 +6454,10 @@ void qlt_remove_target_resources(struct qla_hw_data *ha)
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
unsigned char *b)
{
- int i;
-
- pr_debug("qla2xxx HW vha->node_name: ");
- for (i = 0; i < WWN_SIZE; i++)
- pr_debug("%02x ", vha->node_name[i]);
- pr_debug("\n");
- pr_debug("qla2xxx HW vha->port_name: ");
- for (i = 0; i < WWN_SIZE; i++)
- pr_debug("%02x ", vha->port_name[i]);
- pr_debug("\n");
-
- pr_debug("qla2xxx passed configfs WWPN: ");
+ pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
+ pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
put_unaligned_be64(wwpn, b);
- for (i = 0; i < WWN_SIZE; i++)
- pr_debug("%02x ", b[i]);
- pr_debug("\n");
+ pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
}
/**
@@ -6671,6 +6618,8 @@ qlt_enable_vha(struct scsi_qla_host *vha)
if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
return;
+ if (ha->tgt.num_act_qpairs > ha->max_qpairs)
+ ha->tgt.num_act_qpairs = ha->max_qpairs;
spin_lock_irqsave(&ha->hardware_lock, flags);
tgt->tgt_stopped = 0;
qlt_set_mode(vha);
@@ -6685,7 +6634,8 @@ qlt_enable_vha(struct scsi_qla_host *vha)
} else {
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
- qla2x00_wait_for_hba_online(base_vha);
+ WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
+ QLA_SUCCESS);
}
mutex_unlock(&ha->optrom_mutex);
}
@@ -6716,7 +6666,9 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- qla2x00_wait_for_hba_online(vha);
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_tgt, vha, 0xe081,
+ "qla2x00_wait_for_hba_online() failed\n");
}
/*
@@ -6815,7 +6767,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
*/
ql_log(ql_log_warn, vha, 0xd03c,
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
- pkt->u.isp24.fcp_hdr.s_id,
+ &pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index b8d244f1e189..d006f0a97b8c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -247,9 +247,9 @@ struct ctio_to_2xxx {
struct fcp_hdr {
uint8_t r_ctl;
- uint8_t d_id[3];
+ be_id_t d_id;
uint8_t cs_ctl;
- uint8_t s_id[3];
+ be_id_t s_id;
uint8_t type;
uint8_t f_ctl[3];
uint8_t seq_id;
@@ -261,9 +261,9 @@ struct fcp_hdr {
} __packed;
struct fcp_hdr_le {
- uint8_t d_id[3];
+ le_id_t d_id;
uint8_t r_ctl;
- uint8_t s_id[3];
+ le_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
@@ -402,7 +402,7 @@ struct ctio7_to_24xx {
uint16_t dseg_count; /* Data segment count. */
uint8_t vp_index;
uint8_t add_flags;
- uint8_t initiator_id[3];
+ le_id_t initiator_id;
uint8_t reserved;
uint32_t exchange_addr;
union {
@@ -498,7 +498,7 @@ struct ctio_crc2_to_fw {
uint8_t add_flags; /* additional flags */
#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
- uint8_t initiator_id[3]; /* initiator ID */
+ le_id_t initiator_id; /* initiator ID */
uint8_t reserved1;
uint32_t exchange_addr; /* rcv exchange address */
uint16_t reserved2;
@@ -682,7 +682,7 @@ struct qla_tgt_func_tmpl {
struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *,
const uint16_t);
struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *,
- const uint8_t *);
+ const be_id_t);
void (*clear_nacl_from_fcport_map)(struct fc_port *);
void (*put_sess)(struct fc_port *);
void (*shutdown_sess)(struct fc_port *);
@@ -912,7 +912,7 @@ struct qla_tgt_cmd {
uint8_t scsi_status, sense_key, asc, ascq;
struct crc_context *ctx;
- uint8_t *cdb;
+ const uint8_t *cdb;
uint64_t lba;
uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
uint32_t a_ref_tag, e_ref_tag;
@@ -1030,22 +1030,11 @@ static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha)
return (ha->host->active_mode == MODE_DUAL);
}
-static inline uint32_t sid_to_key(const uint8_t *s_id)
+static inline uint32_t sid_to_key(const be_id_t s_id)
{
- uint32_t key;
-
- key = (((unsigned long)s_id[0] << 16) |
- ((unsigned long)s_id[1] << 8) |
- (unsigned long)s_id[2]);
- return key;
-}
-
-static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
-{
- memset(p, 0, sizeof(*p));
- p->b.domain = s_id[0];
- p->b.area = s_id[1];
- p->b.al_pa = s_id[2];
+ return s_id.domain << 16 |
+ s_id.area << 8 |
+ s_id.al_pa;
}
/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index de696a07532e..294d77c02cdf 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -429,7 +429,7 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd20a,
"%s: reset risc [%lx]\n", __func__, *len);
if (buf)
- qla24xx_soft_reset(vha->hw);
+ WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
return qla27xx_next_entry(ent);
}
@@ -860,8 +860,9 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
{
uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
- sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
- v+0, v+1, v+2, v+3, v+4, v+5);
+ WARN_ON_ONCE(sscanf(qla2x00_version_str,
+ "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+ v+0, v+1, v+2, v+3, v+4, v+5) != 6);
tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
tmp->driver_info[1] = v[5] << 8 | v[4];
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cd6bdf71e533..a8f2a953ceff 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.16-k"
+#define QLA2XXX_VERSION "10.01.00.19-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 1
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d15412d3d9bd..042a24314edc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -620,6 +620,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+ struct scsi_qla_host *vha = cmd->vha;
if (cmd->aborted) {
/* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
@@ -632,6 +633,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0;
}
@@ -659,6 +661,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+ struct scsi_qla_host *vha = cmd->vha;
int xmit_type = QLA_TGT_XMIT_STATUS;
if (cmd->aborted) {
@@ -672,6 +675,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd, kref_read(&cmd->se_cmd.cmd_kref),
cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
return 0;
}
cmd->bufflen = se_cmd->data_length;
@@ -1136,9 +1140,8 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn,
/*
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
-static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(
- scsi_qla_host_t *vha,
- const uint8_t *s_id)
+static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha,
+ const be_id_t s_id)
{
struct tcm_qla2xxx_lport *lport;
struct se_node_acl *se_nacl;
@@ -1181,7 +1184,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
struct tcm_qla2xxx_nacl *nacl,
struct se_session *se_sess,
struct fc_port *fc_port,
- uint8_t *s_id)
+ be_id_t s_id)
{
u32 key;
void *slot;
@@ -1348,14 +1351,9 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess)
{
struct se_session *se_sess = sess->se_sess;
- unsigned char be_sid[3];
-
- be_sid[0] = sess->d_id.b.domain;
- be_sid[1] = sess->d_id.b.area;
- be_sid[2] = sess->d_id.b.al_pa;
tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
- sess, be_sid);
+ sess, port_id_to_be_id(sess->d_id));
tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
sess, sess->loop_id);
}
@@ -1401,19 +1399,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
struct fc_port *qlat_sess = p;
uint16_t loop_id = qlat_sess->loop_id;
unsigned long flags;
- unsigned char be_sid[3];
-
- be_sid[0] = qlat_sess->d_id.b.domain;
- be_sid[1] = qlat_sess->d_id.b.area;
- be_sid[2] = qlat_sess->d_id.b.al_pa;
/*
* And now setup se_nacl and session pointers into HW lport internal
* mappings for fabric S_ID and LOOP_ID.
*/
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
- se_sess, qlat_sess, be_sid);
+ tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess,
+ port_id_to_be_id(qlat_sess->d_id));
tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
se_sess, qlat_sess, loop_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9335849f6bea..d539beef3ce8 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -200,10 +200,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
/* Write mailbox command registers. */
switch (mbox_param[param[0]] >> 4) {
case 6: sbus_writew(param[5], qpti->qregs + MBOX5);
+ /* Fall through */
case 5: sbus_writew(param[4], qpti->qregs + MBOX4);
+ /* Fall through */
case 4: sbus_writew(param[3], qpti->qregs + MBOX3);
+ /* Fall through */
case 3: sbus_writew(param[2], qpti->qregs + MBOX2);
+ /* Fall through */
case 2: sbus_writew(param[1], qpti->qregs + MBOX1);
+ /* Fall through */
case 1: sbus_writew(param[0], qpti->qregs + MBOX0);
}
@@ -254,10 +259,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f
/* Read back output parameters. */
switch (mbox_param[param[0]] & 0xf) {
case 6: param[5] = sbus_readw(qpti->qregs + MBOX5);
+ /* Fall through */
case 5: param[4] = sbus_readw(qpti->qregs + MBOX4);
+ /* Fall through */
case 4: param[3] = sbus_readw(qpti->qregs + MBOX3);
+ /* Fall through */
case 3: param[2] = sbus_readw(qpti->qregs + MBOX2);
+ /* Fall through */
case 2: param[1] = sbus_readw(qpti->qregs + MBOX1);
+ /* Fall through */
case 1: param[0] = sbus_readw(qpti->qregs + MBOX0);
}
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index c5a8756384bc..c19ea7ab54cb 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/bitops.h>
#include <linux/seq_file.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
@@ -18,9 +19,7 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
bool sep = false;
int i;
- for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
- if (!(flags & BIT(i)))
- continue;
+ for_each_set_bit(i, &flags, BITS_PER_LONG) {
if (sep)
seq_puts(m, "|");
sep = true;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4e88d7e9cf9a..dc210b9d4896 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1678,10 +1678,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(req);
}
+ cmd->flags &= SCMD_PRESERVED_FLAGS;
if (sdev->simple_tags)
cmd->flags |= SCMD_TAGGED;
- else
- cmd->flags &= ~SCMD_TAGGED;
+ if (bd->last)
+ cmd->flags |= SCMD_LAST;
scsi_init_cmd_errh(cmd);
cmd->scsi_done = scsi_mq_done;
@@ -1821,10 +1822,37 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(__scsi_init_queue);
+static const struct blk_mq_ops scsi_mq_ops_no_commit = {
+ .get_budget = scsi_mq_get_budget,
+ .put_budget = scsi_mq_put_budget,
+ .queue_rq = scsi_queue_rq,
+ .complete = scsi_softirq_done,
+ .timeout = scsi_timeout,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .show_rq = scsi_show_rq,
+#endif
+ .init_request = scsi_mq_init_request,
+ .exit_request = scsi_mq_exit_request,
+ .initialize_rq_fn = scsi_initialize_rq,
+ .busy = scsi_mq_lld_busy,
+ .map_queues = scsi_map_queues,
+};
+
+
+static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+
+ shost->hostt->commit_rqs(shost, hctx->queue_num);
+}
+
static const struct blk_mq_ops scsi_mq_ops = {
.get_budget = scsi_mq_get_budget,
.put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
+ .commit_rqs = scsi_commit_rqs,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
#ifdef CONFIG_BLK_DEBUG_FS
@@ -1861,7 +1889,10 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
memset(&shost->tag_set, 0, sizeof(shost->tag_set));
- shost->tag_set.ops = &scsi_mq_ops;
+ if (shost->hostt->commit_rqs)
+ shost->tag_set.ops = &scsi_mq_ops;
+ else
+ shost->tag_set.ops = &scsi_mq_ops_no_commit;
shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
shost->tag_set.queue_depth = shost->can_queue;
shost->tag_set.cmd_size = cmd_size;
@@ -2691,6 +2722,14 @@ void scsi_start_queue(struct scsi_device *sdev)
int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
enum scsi_device_state new_state)
{
+ switch (new_state) {
+ case SDEV_RUNNING:
+ case SDEV_TRANSPORT_OFFLINE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/*
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
@@ -2748,7 +2787,12 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev,
static void
device_block(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_block(sdev);
+ int ret;
+
+ ret = scsi_internal_device_block(sdev);
+
+ WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
+ dev_name(&sdev->sdev_gendev), ret);
}
static int
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index 39b8cc4574b4..c6ed0b12e807 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -15,57 +15,15 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
-#define SCSI_LOG_SPOOLSIZE 4096
-
-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
-#warning SCSI logging bitmask too large
-#endif
-
-struct scsi_log_buf {
- char buffer[SCSI_LOG_SPOOLSIZE];
- unsigned long map;
-};
-
-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
-
static char *scsi_log_reserve_buffer(size_t *len)
{
- struct scsi_log_buf *buf;
- unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
- unsigned long idx = 0;
-
- preempt_disable();
- buf = this_cpu_ptr(&scsi_format_log);
- idx = find_first_zero_bit(&buf->map, map_bits);
- if (likely(idx < map_bits)) {
- while (test_and_set_bit(idx, &buf->map)) {
- idx = find_next_zero_bit(&buf->map, map_bits, idx);
- if (idx >= map_bits)
- break;
- }
- }
- if (WARN_ON(idx >= map_bits)) {
- preempt_enable();
- return NULL;
- }
- *len = SCSI_LOG_BUFSIZE;
- return buf->buffer + idx * SCSI_LOG_BUFSIZE;
+ *len = 128;
+ return kmalloc(*len, GFP_ATOMIC);
}
static void scsi_log_release_buffer(char *bufptr)
{
- struct scsi_log_buf *buf;
- unsigned long idx;
- int ret;
-
- buf = this_cpu_ptr(&scsi_format_log);
- if (bufptr >= buf->buffer &&
- bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
- idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
- ret = test_and_clear_bit(idx, &buf->map);
- WARN_ON(!ret);
- }
- preempt_enable();
+ kfree(bufptr);
}
static inline const char *scmd_name(const struct scsi_cmnd *scmd)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4b925552458f..7623196de9e3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1981,6 +1981,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
sd_printk(KERN_INFO, sdkp,
"Unaligned partial completion (resid=%u, sector_sz=%u)\n",
resid, sector_size);
+ scsi_print_command(SCpnt);
resid = min(scsi_bufflen(SCpnt),
round_up(resid, sector_size));
scsi_set_resid(SCpnt, resid);
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 97e159c2cecd..bc6506884e3b 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,6 +1,8 @@
#
# Kernel configuration file for the SMARTPQI
#
+# Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
# (mailto:esc.storagedev@microsemi.com)
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index e8e768849c70..79d2af36f655 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -822,6 +822,7 @@ union pqi_reset_register {
#define PQI_HBA_BUS 2
#define PQI_EXTERNAL_RAID_VOLUME_BUS 3
#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS
+#define PQI_VSEP_CISS_BTL 379
struct report_lun_header {
__be32 list_length;
@@ -930,6 +931,9 @@ struct pqi_scsi_dev {
u8 active_path_index;
u8 path_map;
u8 bay;
+ u8 box_index;
+ u8 phys_box_on_bus;
+ u8 phy_connected_dev_type;
u8 box[8];
u16 phys_connector[8];
bool raid_bypass_configured; /* RAID bypass configured */
@@ -1073,6 +1077,9 @@ struct pqi_ctrl_info {
unsigned int ctrl_id;
struct pci_dev *pci_dev;
char firmware_version[11];
+ char serial_number[17];
+ char model[17];
+ char vendor[9];
void __iomem *iomem_base;
struct pqi_ctrl_registers __iomem *registers;
struct pqi_device_registers __iomem *pqi_registers;
@@ -1224,12 +1231,21 @@ struct bmic_identify_controller {
__le16 extended_logical_unit_count;
u8 reserved1[34];
__le16 firmware_build_number;
- u8 reserved2[100];
+ u8 reserved2[8];
+ u8 vendor_id[8];
+ u8 product_id[16];
+ u8 reserved3[68];
u8 controller_mode;
- u8 reserved3[32];
+ u8 reserved4[32];
+};
+
+struct bmic_sense_subsystem_info {
+ u8 reserved[44];
+ u8 ctrl_serial_number[16];
};
#define SA_EXPANDER_SMP_DEVICE 0x05
+#define SA_CONTROLLER_DEVICE 0x07
/*SCSI Invalid Device Type for SAS devices*/
#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8fd5ffc55792..ea5409bebf57 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.6-015"
+#define DRIVER_VERSION "1.2.8-026"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 6
-#define DRIVER_REVISION 15
+#define DRIVER_RELEASE 8
+#define DRIVER_REVISION 26
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -145,6 +145,18 @@ MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
"\t\tSupported: none, reboot, panic\n"
"\t\tDefault: none");
+static int pqi_expose_ld_first;
+module_param_named(expose_ld_first,
+ pqi_expose_ld_first, int, 0644);
+MODULE_PARM_DESC(expose_ld_first,
+ "Expose logical drives before physical drives.");
+
+static int pqi_hide_vsep;
+module_param_named(hide_vsep,
+ pqi_hide_vsep, int, 0644);
+MODULE_PARM_DESC(hide_vsep,
+ "Hide the virtual SEP for direct attached drives.");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
@@ -472,6 +484,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
/* fall through */
case BMIC_IDENTIFY_CONTROLLER:
case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+ case BMIC_SENSE_SUBSYSTEM_INFORMATION:
request->data_direction = SOP_READ_FLAG;
cdb[0] = BMIC_READ;
cdb[6] = cmd;
@@ -600,6 +613,14 @@ static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
buffer, sizeof(*buffer));
}
+static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
+ struct bmic_sense_subsystem_info *sense_info)
+{
+ return pqi_send_ctrl_raid_request(ctrl_info,
+ BMIC_SENSE_SUBSYSTEM_INFORMATION,
+ sense_info, sizeof(*sense_info));
+}
+
static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
@@ -1392,7 +1413,9 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
return;
}
-
+ device->box_index = id_phys->box_index;
+ device->phys_box_on_bus = id_phys->phys_box_on_bus;
+ device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
device->queue_depth =
get_unaligned_le16(&id_phys->current_queue_depth_limit);
device->device_type = id_phys->device_type;
@@ -1719,6 +1742,10 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
existing_device->active_path_index = new_device->active_path_index;
existing_device->path_map = new_device->path_map;
existing_device->bay = new_device->bay;
+ existing_device->box_index = new_device->box_index;
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+ existing_device->phy_connected_dev_type =
+ new_device->phy_connected_dev_type;
memcpy(existing_device->box, new_device->box,
sizeof(existing_device->box));
memcpy(existing_device->phys_connector, new_device->phys_connector,
@@ -1945,6 +1972,11 @@ static inline bool pqi_skip_device(u8 *scsi3addr)
return false;
}
+static inline void pqi_mask_device(u8 *scsi3addr)
+{
+ scsi3addr[3] |= 0xc0;
+}
+
static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
{
if (!device->is_physical_device)
@@ -1988,6 +2020,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
unsigned int num_valid_devices;
bool is_physical_device;
u8 *scsi3addr;
+ unsigned int physical_index;
+ unsigned int logical_index;
static char *out_of_memory_msg =
"failed to allocate memory, device discovery stopped";
@@ -2023,6 +2057,21 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = -ENOMEM;
goto out;
}
+ if (pqi_hide_vsep) {
+ int i;
+
+ for (i = num_physicals - 1; i >= 0; i--) {
+ phys_lun_ext_entry =
+ &physdev_list->lun_entries[i];
+ if (CISS_GET_DRIVE_NUMBER(
+ phys_lun_ext_entry->lunid) ==
+ PQI_VSEP_CISS_BTL) {
+ pqi_mask_device(
+ phys_lun_ext_entry->lunid);
+ break;
+ }
+ }
+ }
}
num_new_devices = num_physicals + num_logicals;
@@ -2050,19 +2099,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device = NULL;
num_valid_devices = 0;
+ physical_index = 0;
+ logical_index = 0;
for (i = 0; i < num_new_devices; i++) {
- if (i < num_physicals) {
+ if ((!pqi_expose_ld_first && i < num_physicals) ||
+ (pqi_expose_ld_first && i >= num_logicals)) {
is_physical_device = true;
- phys_lun_ext_entry = &physdev_list->lun_entries[i];
+ phys_lun_ext_entry =
+ &physdev_list->lun_entries[physical_index++];
log_lun_ext_entry = NULL;
scsi3addr = phys_lun_ext_entry->lunid;
} else {
is_physical_device = false;
phys_lun_ext_entry = NULL;
log_lun_ext_entry =
- &logdev_list->lun_entries[i - num_physicals];
+ &logdev_list->lun_entries[logical_index++];
scsi3addr = log_lun_ext_entry->lunid;
}
@@ -2122,11 +2175,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->aio_handle =
phys_lun_ext_entry->aio_handle;
}
- if (device->devtype == TYPE_DISK ||
- device->devtype == TYPE_ZBC) {
+
pqi_get_physical_disk_info(ctrl_info,
device, id_phys);
- }
+
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
@@ -2184,18 +2236,20 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
- int rc;
+ int rc = 0;
if (pqi_ctrl_offline(ctrl_info))
return -ENXIO;
- mutex_lock(&ctrl_info->scan_mutex);
-
- rc = pqi_update_scsi_devices(ctrl_info);
- if (rc)
+ if (!mutex_trylock(&ctrl_info->scan_mutex)) {
pqi_schedule_rescan_worker_delayed(ctrl_info);
-
- mutex_unlock(&ctrl_info->scan_mutex);
+ rc = -EINPROGRESS;
+ } else {
+ rc = pqi_update_scsi_devices(ctrl_info);
+ if (rc)
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
+ mutex_unlock(&ctrl_info->scan_mutex);
+ }
return rc;
}
@@ -6091,23 +6145,65 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
return rc;
}
-static ssize_t pqi_version_show(struct device *dev,
+static ssize_t pqi_firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
+}
+
+static ssize_t pqi_driver_version_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
+
+ return snprintf(buffer, PAGE_SIZE,
+ "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+}
+
+static ssize_t pqi_serial_number_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- ssize_t count = 0;
struct Scsi_Host *shost;
struct pqi_ctrl_info *ctrl_info;
shost = class_to_shost(dev);
ctrl_info = shost_to_hba(shost);
- count += snprintf(buffer + count, PAGE_SIZE - count,
- " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
+}
+
+static ssize_t pqi_model_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
- count += snprintf(buffer + count, PAGE_SIZE - count,
- "firmware: %s\n", ctrl_info->firmware_version);
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
- return count;
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
+}
+
+static ssize_t pqi_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+
+ shost = class_to_shost(dev);
+ ctrl_info = shost_to_hba(shost);
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
}
static ssize_t pqi_host_rescan_store(struct device *dev,
@@ -6160,13 +6256,21 @@ static ssize_t pqi_lockup_action_store(struct device *dev,
return -EINVAL;
}
-static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
+static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
+static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
+static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
+static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
+static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
static DEVICE_ATTR(lockup_action, 0644,
pqi_lockup_action_show, pqi_lockup_action_store);
static struct device_attribute *pqi_shost_attrs[] = {
- &dev_attr_version,
+ &dev_attr_driver_version,
+ &dev_attr_firmware_version,
+ &dev_attr_model,
+ &dev_attr_serial_number,
+ &dev_attr_vendor,
&dev_attr_rescan,
&dev_attr_lockup_action,
NULL
@@ -6558,7 +6662,30 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
return rc;
}
-static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
+static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct bmic_sense_subsystem_info *sense_info;
+
+ sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
+ if (!sense_info)
+ return -ENOMEM;
+
+ rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
+ if (rc)
+ goto out;
+
+ memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
+ sizeof(sense_info->ctrl_serial_number));
+ ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
+
+out:
+ kfree(sense_info);
+
+ return rc;
+}
+
+static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
{
int rc;
struct bmic_identify_controller *identify;
@@ -6579,6 +6706,14 @@ static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
sizeof(ctrl_info->firmware_version),
"-%u", get_unaligned_le16(&identify->firmware_build_number));
+ memcpy(ctrl_info->model, identify->product_id,
+ sizeof(identify->product_id));
+ ctrl_info->model[sizeof(identify->product_id)] = '\0';
+
+ memcpy(ctrl_info->vendor, identify->vendor_id,
+ sizeof(identify->vendor_id));
+ ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+
out:
kfree(identify);
@@ -7098,10 +7233,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- rc = pqi_get_ctrl_firmware_version(ctrl_info);
+ rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining firmware version\n");
+ "error obtaining product details\n");
+ return rc;
+ }
+
+ rc = pqi_get_ctrl_serial_number(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error obtaining ctrl serial number\n");
return rc;
}
@@ -7241,10 +7383,10 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- rc = pqi_get_ctrl_firmware_version(ctrl_info);
+ rc = pqi_get_ctrl_product_details(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error obtaining firmware version\n");
+ "error obtaining product detail\n");
return rc;
}
@@ -8024,6 +8166,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x004f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -8088,6 +8234,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0808)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0809)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0900)
},
{
@@ -8244,6 +8398,26 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0800)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0908)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0806)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1d8d, 0x0916)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_GIGABYTE, 0x1000)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 5cca1b9ef1f1..6776dfc1d317 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -312,12 +312,110 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy)
static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
u64 *identifier)
{
- return 0;
+
+ int rc;
+ unsigned long flags;
+ struct Scsi_Host *shost;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *found_device;
+ struct pqi_scsi_dev *device;
+
+ if (!rphy)
+ return -ENODEV;
+
+ shost = rphy_to_shost(rphy);
+ ctrl_info = shost_to_hba(shost);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ found_device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
+
+ if (!found_device) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ if (found_device->devtype == TYPE_ENCLOSURE) {
+ *identifier = get_unaligned_be64(&found_device->wwid);
+ rc = 0;
+ goto out;
+ }
+
+ if (found_device->box_index == 0xff ||
+ found_device->phys_box_on_bus == 0 ||
+ found_device->bay == 0xff) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (device->devtype == TYPE_ENCLOSURE &&
+ device->box_index == found_device->box_index &&
+ device->phys_box_on_bus ==
+ found_device->phys_box_on_bus &&
+ memcmp(device->phys_connector,
+ found_device->phys_connector, 2) == 0) {
+ *identifier =
+ get_unaligned_be64(&device->wwid);
+ rc = 0;
+ goto out;
+ }
+ }
+
+ if (found_device->phy_connected_dev_type != SA_CONTROLLER_DEVICE) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (device->devtype == TYPE_ENCLOSURE &&
+ CISS_GET_DRIVE_NUMBER(device->scsi3addr) ==
+ PQI_VSEP_CISS_BTL) {
+ *identifier = get_unaligned_be64(&device->wwid);
+ rc = 0;
+ goto out;
+ }
+ }
+
+ rc = -EINVAL;
+out:
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return rc;
+
}
static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
{
- return -ENXIO;
+
+ int rc;
+ unsigned long flags;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ struct Scsi_Host *shost;
+
+ if (!rphy)
+ return -ENODEV;
+
+ shost = rphy_to_shost(rphy);
+ ctrl_info = shost_to_hba(shost);
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
+
+ if (!device) {
+ rc = -ENODEV;
+ goto out;
+ }
+
+ if (device->bay == 0xff)
+ rc = -EINVAL;
+ else
+ rc = device->bay;
+
+out:
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return rc;
}
static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset)
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3d80ab67a626..955e4c938d49 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -397,10 +397,12 @@ static int sun3scsi_dma_finish(int write_flag)
case CSR_LEFT_3:
*vaddr = (dregs->bpack_lo & 0xff00) >> 8;
vaddr--;
+ /* Fall through */
case CSR_LEFT_2:
*vaddr = (dregs->bpack_hi & 0x00ff);
vaddr--;
+ /* Fall through */
case CSR_LEFT_1:
*vaddr = (dregs->bpack_hi & 0xff00) >> 8;
diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c
index dd3f07b31612..9dc17f1288f9 100644
--- a/drivers/scsi/sym53c8xx_2/sym_nvram.c
+++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c
@@ -648,7 +648,7 @@ static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram)
{
u_char gpcntl, gpreg;
u_char old_gpcntl, old_gpreg;
- int retv = 1;
+ int retv;
/* save current state of GPCNTL and GPREG */
old_gpreg = INB(np, nc_gpreg);
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
index 86dbb723f3ac..b2af04c57a39 100644
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ b/drivers/scsi/ufs/cdns-pltfrm.c
@@ -62,23 +62,47 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
}
/**
- * Sets clocks used by the controller
+ * Called before and after HCE enable bit is set.
* @hba: host controller instance
- * @on: if true, enable clocks, otherwise disable
* @status: notify stage (pre, post change)
*
* Return zero for success and non-zero for failure
*/
-static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
+static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
{
- if ((!on) || (status == PRE_CHANGE))
+ if (status != PRE_CHANGE)
return 0;
return cdns_ufs_set_hclkdiv(hba);
}
/**
+ * Called before and after Link startup is carried out.
+ * @hba: host controller instance
+ * @status: notify stage (pre, post change)
+ *
+ * Return zero for success and non-zero for failure
+ */
+static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ if (status != PRE_CHANGE)
+ return 0;
+
+ /*
+ * Some UFS devices have issues if LCC is enabled.
+ * So we are setting PA_Local_TX_LCC_Enable to 0
+ * before link startup which will make sure that both host
+ * and device TX LCC are disabled once link startup is
+ * completed.
+ */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
+
+ return 0;
+}
+
+/**
* cdns_ufs_init - performs additional ufs initialization
* @hba: host controller instance
*
@@ -114,13 +138,15 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
- .setup_clocks = cdns_ufs_setup_clocks,
+ .hce_enable_notify = cdns_ufs_hce_enable_notify,
+ .link_startup_notify = cdns_ufs_link_startup_notify,
};
static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
.init = cdns_ufs_init,
- .setup_clocks = cdns_ufs_setup_clocks,
+ .hce_enable_notify = cdns_ufs_hce_enable_notify,
+ .link_startup_notify = cdns_ufs_link_startup_notify,
.phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
};
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index f4d1dca962c4..6bbb1679bb91 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -447,13 +447,11 @@ static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
{
- struct resource *mem_res;
struct device *dev = host->hba->dev;
struct platform_device *pdev = to_platform_device(dev);
/* get resource of ufs sys ctrl */
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res);
+ host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(host->ufs_sys_ctrl))
return PTR_ERR(host->ufs_sys_ctrl);
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index ee4b1da1e223..a5b71487a206 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/gpio/consumer.h>
#include <linux/reset-controller.h>
#include "ufshcd.h"
@@ -800,7 +801,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
- u32 val;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_dev_params ufs_qcom_cap;
int ret = 0;
@@ -869,8 +869,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ret = -EINVAL;
}
- val = ~(MAX_U32 << dev_req_params->lane_tx);
-
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
@@ -1140,6 +1138,15 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
+ host->device_reset = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(host->device_reset)) {
+ err = PTR_ERR(host->device_reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to acquire reset gpio: %d\n", err);
+ goto out_variant_clear;
+ }
+
err = ufs_qcom_bus_register(host);
if (err)
goto out_variant_clear;
@@ -1546,12 +1553,37 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
}
/**
+ * ufs_qcom_device_reset() - toggle the (optional) device reset line
+ * @hba: per-adapter instance
+ *
+ * Toggles the (optional) reset line to reset the attached device.
+ */
+static void ufs_qcom_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ /* reset gpio is optional */
+ if (!host->device_reset)
+ return;
+
+ /*
+ * The UFS device shall detect reset pulses of 1us, sleep for 10us to
+ * be on the safe side.
+ */
+ gpiod_set_value_cansleep(host->device_reset, 1);
+ usleep_range(10, 15);
+
+ gpiod_set_value_cansleep(host->device_reset, 0);
+ usleep_range(10, 15);
+}
+
+/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization.
*/
-static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
+static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.name = "qcom",
.init = ufs_qcom_init,
.exit = ufs_qcom_exit,
@@ -1565,6 +1597,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.suspend = ufs_qcom_suspend,
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
+ .device_reset = ufs_qcom_device_reset,
};
/**
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 001915d1e0e4..d401f174bb70 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -195,6 +195,8 @@ struct ufs_qcom_testbus {
u8 select_minor;
};
+struct gpio_desc;
+
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
@@ -232,6 +234,8 @@ struct ufs_qcom_host {
struct ufs_qcom_testbus testbus;
struct reset_controller_dev rcdev;
+
+ struct gpio_desc *device_reset;
};
static inline u32
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index f478685122ff..969a36b15897 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -571,9 +571,10 @@ static ssize_t _name##_show(struct device *dev, \
int ret; \
int desc_len = QUERY_DESC_MAX_SIZE; \
u8 *desc_buf; \
+ \
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
- if (!desc_buf) \
- return -ENOMEM; \
+ if (!desc_buf) \
+ return -ENOMEM; \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
0, 0, desc_buf, &desc_len); \
@@ -582,14 +583,13 @@ static ssize_t _name##_show(struct device *dev, \
goto out; \
} \
index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
- memset(desc_buf, 0, QUERY_DESC_MAX_SIZE); \
- if (ufshcd_read_string_desc(hba, index, desc_buf, \
- QUERY_DESC_MAX_SIZE, true)) { \
- ret = -EINVAL; \
+ kfree(desc_buf); \
+ desc_buf = NULL; \
+ ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
+ SD_ASCII_STD); \
+ if (ret < 0) \
goto out; \
- } \
- ret = snprintf(buf, PAGE_SIZE, "%s\n", \
- desc_buf + QUERY_DESC_HDR_SIZE); \
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \
out: \
kfree(desc_buf); \
return ret; \
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 99a9c4d16f6b..3327981ef894 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -541,7 +541,7 @@ struct ufs_dev_info {
*/
struct ufs_dev_desc {
u16 wmanufacturerid;
- char model[MAX_MODEL_LEN + 1];
+ u8 *model;
};
/**
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index d7d521b394c3..8d40dc918f4e 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -391,12 +391,10 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
{
struct ufs_hba *hba;
void __iomem *mmio_base;
- struct resource *mem_res;
int irq, err;
struct device *dev = &pdev->dev;
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio_base = devm_ioremap_resource(dev, mem_res);
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base)) {
err = PTR_ERR(mmio_base);
goto out;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 029da74bb2f5..034dd9cb9ec8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -299,16 +299,6 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
scsi_block_requests(hba->host);
}
-/* replace non-printable or non-ASCII characters with spaces */
-static inline void ufshcd_remove_non_printable(char *val)
-{
- if (!val)
- return;
-
- if (*val < 0x20 || *val > 0x7e)
- *val = ' ';
-}
-
static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
const char *str)
{
@@ -390,24 +380,25 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
}
}
-static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
- struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+static void ufshcd_print_err_hist(struct ufs_hba *hba,
+ struct ufs_err_reg_hist *err_hist,
+ char *err_name)
{
int i;
bool found = false;
- for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
- int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH;
+ for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) {
+ int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH;
if (err_hist->reg[p] == 0)
continue;
- dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
found = true;
}
if (!found)
- dev_err(hba->dev, "No record of %s uic errors\n", err_name);
+ dev_err(hba->dev, "No record of %s errors\n", err_name);
}
static void ufshcd_print_host_regs(struct ufs_hba *hba)
@@ -423,11 +414,22 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
hba->ufs_stats.hibern8_exit_cnt);
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
- ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err,
+ "auto_hibern8_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err,
+ "link_startup_fail");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err,
+ "suspend_fail");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
+ ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
ufshcd_print_clk_freqs(hba);
@@ -3199,7 +3201,7 @@ out:
static inline int ufshcd_read_desc(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
- u8 *buf,
+ void *buf,
u32 size)
{
return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
@@ -3218,48 +3220,77 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
}
/**
+ * struct uc_string_id - unicode string
+ *
+ * @len: size of this descriptor inclusive
+ * @type: descriptor type
+ * @uc: unicode string character
+ */
+struct uc_string_id {
+ u8 len;
+ u8 type;
+ wchar_t uc[0];
+} __packed;
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline char ufshcd_remove_non_printable(u8 ch)
+{
+ return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
+}
+
+/**
* ufshcd_read_string_desc - read string descriptor
* @hba: pointer to adapter instance
* @desc_index: descriptor index
- * @buf: pointer to buffer where descriptor would be read
- * @size: size of buf
+ * @buf: pointer to buffer where descriptor would be read,
+ * the caller should free the memory.
* @ascii: if true convert from unicode to ascii characters
+ * null terminated string.
*
- * Return 0 in case of success, non-zero otherwise
+ * Return:
+ * * string size on success.
+ * * -ENOMEM: on allocation failure
+ * * -EINVAL: on a wrong parameter
*/
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
- u8 *buf, u32 size, bool ascii)
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ u8 **buf, bool ascii)
{
- int err = 0;
+ struct uc_string_id *uc_str;
+ u8 *str;
+ int ret;
- err = ufshcd_read_desc(hba,
- QUERY_DESC_IDN_STRING, desc_index, buf, size);
+ if (!buf)
+ return -EINVAL;
- if (err) {
- dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
- __func__, QUERY_REQ_RETRIES, err);
+ uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
+ if (!uc_str)
+ return -ENOMEM;
+
+ ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING,
+ desc_index, uc_str,
+ QUERY_DESC_MAX_SIZE);
+ if (ret < 0) {
+ dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
+ QUERY_REQ_RETRIES, ret);
+ str = NULL;
+ goto out;
+ }
+
+ if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
+ dev_dbg(hba->dev, "String Desc is of zero length\n");
+ str = NULL;
+ ret = 0;
goto out;
}
if (ascii) {
- int desc_len;
- int ascii_len;
+ ssize_t ascii_len;
int i;
- char *buff_ascii;
-
- desc_len = buf[0];
/* remove header and divide by 2 to move from UTF16 to UTF8 */
- ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
- if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
- dev_err(hba->dev, "%s: buffer allocated size is too small\n",
- __func__);
- err = -ENOMEM;
- goto out;
- }
-
- buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
- if (!buff_ascii) {
- err = -ENOMEM;
+ ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+ str = kzalloc(ascii_len, GFP_KERNEL);
+ if (!str) {
+ ret = -ENOMEM;
goto out;
}
@@ -3267,22 +3298,28 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
* the descriptor contains string in UTF16 format
* we need to convert to utf-8 so it can be displayed
*/
- utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
- desc_len - QUERY_DESC_HDR_SIZE,
- UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+ ret = utf16s_to_utf8s(uc_str->uc,
+ uc_str->len - QUERY_DESC_HDR_SIZE,
+ UTF16_BIG_ENDIAN, str, ascii_len);
/* replace non-printable or non-ASCII characters with spaces */
- for (i = 0; i < ascii_len; i++)
- ufshcd_remove_non_printable(&buff_ascii[i]);
+ for (i = 0; i < ret; i++)
+ str[i] = ufshcd_remove_non_printable(str[i]);
- memset(buf + QUERY_DESC_HDR_SIZE, 0,
- size - QUERY_DESC_HDR_SIZE);
- memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
- buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
- kfree(buff_ascii);
+ str[ret++] = '\0';
+
+ } else {
+ str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
+ if (!str) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = uc_str->len;
}
out:
- return err;
+ *buf = str;
+ kfree(uc_str);
+ return ret;
}
/**
@@ -4214,12 +4251,6 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
int retry;
- /*
- * msleep of 1 and 5 used in this function might result in msleep(20),
- * but it was necessary to send the UFS FPGA to reset mode during
- * development and testing of this driver. msleep can be changed to
- * mdelay and retry count can be reduced based on the controller.
- */
if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
ufshcd_hba_stop(hba, true);
@@ -4242,7 +4273,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
* instruction might be read back.
* This delay can be changed based on the controller.
*/
- msleep(1);
+ usleep_range(1000, 1100);
/* wait for the host controller to complete initialization */
retry = 10;
@@ -4254,7 +4285,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
"Controller enable failed\n");
return -EIO;
}
- msleep(5);
+ usleep_range(5000, 5100);
}
/* enable UIC related interrupts */
@@ -4326,6 +4357,14 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
return ufshcd_disable_tx_lcc(hba, true);
}
+static void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist,
+ u32 reg)
+{
+ reg_hist->reg[reg_hist->pos] = reg;
+ reg_hist->tstamp[reg_hist->pos] = ktime_get();
+ reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH;
+}
+
/**
* ufshcd_link_startup - Initialize unipro link startup
* @hba: per adapter instance
@@ -4353,6 +4392,8 @@ link_startup:
/* check if device is detected by inter-connect layer */
if (!ret && !ufshcd_is_device_present(hba)) {
+ ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ 0);
dev_err(hba->dev, "%s: Device not present\n", __func__);
ret = -ENXIO;
goto out;
@@ -4363,13 +4404,19 @@ link_startup:
* but we can't be sure if the link is up until link startup
* succeeds. So reset the local Uni-Pro and try again.
*/
- if (ret && ufshcd_hba_enable(hba))
+ if (ret && ufshcd_hba_enable(hba)) {
+ ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ (u32)ret);
goto out;
+ }
} while (ret && retries--);
- if (ret)
+ if (ret) {
/* failed to get the link up... retire */
+ ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err,
+ (u32)ret);
goto out;
+ }
if (link_startup_again) {
link_startup_again = false;
@@ -5345,14 +5392,6 @@ out:
pm_runtime_put_sync(hba->dev);
}
-static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
- u32 reg)
-{
- reg_hist->reg[reg_hist->pos] = reg;
- reg_hist->tstamp[reg_hist->pos] = ktime_get();
- reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
-}
-
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
@@ -5371,13 +5410,13 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
* must be checked but this error is handled separately.
*/
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+ ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg)
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+ ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
@@ -5393,19 +5432,19 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
if (reg) {
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
+ ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
if (reg) {
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
+ ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
if (reg) {
- ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
+ ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
}
@@ -5438,8 +5477,10 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
- if (hba->errors & INT_FATAL_ERRORS)
+ if (hba->errors & INT_FATAL_ERRORS) {
+ ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
queue_eh_work = true;
+ }
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
@@ -5454,6 +5495,8 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
__func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
"Enter" : "Exit",
hba->errors, ufshcd_get_upmcrs(hba));
+ ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
+ hba->errors);
queue_eh_work = true;
}
@@ -5652,13 +5695,12 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- __clear_bit(free_slot, &hba->outstanding_tasks);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
}
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __clear_bit(free_slot, &hba->outstanding_tasks);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
clear_bit(free_slot, &hba->tm_condition);
ufshcd_put_tm_slot(hba, free_slot);
wake_up(&hba->tm_tag_wq);
@@ -5941,6 +5983,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
out:
hba->req_abort_count = 0;
+ ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err);
if (!err) {
err = SUCCESS;
} else {
@@ -6034,6 +6077,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
*/
scsi_print_command(hba->lrb[tag].cmd);
if (!hba->req_abort_count) {
+ ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0);
ufshcd_print_host_regs(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
@@ -6169,7 +6213,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
-
+ ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err);
return err;
}
@@ -6189,6 +6233,9 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
int retries = MAX_HOST_RESET_RETRIES;
do {
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -6453,6 +6500,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
u8 model_index;
u8 *desc_buf;
+ if (!dev_desc)
+ return -EINVAL;
+
buff_len = max_t(size_t, hba->desc_size.dev_desc,
QUERY_DESC_MAX_SIZE + 1);
desc_buf = kmalloc(buff_len, GFP_KERNEL);
@@ -6476,31 +6526,31 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
-
- /* Zero-pad entire buffer for string termination. */
- memset(desc_buf, 0, buff_len);
-
- err = ufshcd_read_string_desc(hba, model_index, desc_buf,
- QUERY_DESC_MAX_SIZE, true/*ASCII*/);
- if (err) {
+ err = ufshcd_read_string_desc(hba, model_index,
+ &dev_desc->model, SD_ASCII_STD);
+ if (err < 0) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
goto out;
}
- desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
- strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
- min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
- MAX_MODEL_LEN));
-
- /* Null terminate the model string */
- dev_desc->model[MAX_MODEL_LEN] = '\0';
+ /*
+ * ufshcd_read_string_desc returns size of the string
+ * reset the error value
+ */
+ err = 0;
out:
kfree(desc_buf);
return err;
}
+static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc)
+{
+ kfree(dev_desc->model);
+ dev_desc->model = NULL;
+}
+
static void ufs_fixup_device_setup(struct ufs_hba *hba,
struct ufs_dev_desc *dev_desc)
{
@@ -6509,8 +6559,9 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba,
for (f = ufs_fixups; f->quirk; f++) {
if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
- (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
- !strcmp(f->card.model, UFS_ANY_MODEL)))
+ ((dev_desc->model &&
+ STR_PRFX_EQUAL(f->card.model, dev_desc->model)) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
hba->dev_quirks |= f->quirk;
}
}
@@ -6681,17 +6732,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
{
- int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
-
hba->ufs_stats.hibern8_exit_cnt = 0;
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
-
- memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
- memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
- memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
- memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
- memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
-
hba->req_abort_count = 0;
}
@@ -6861,6 +6903,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
}
ufs_fixup_device_setup(hba, &card);
+ ufs_put_device_desc(&card);
+
ufshcd_tune_unipro_params(hba);
/* UFS device is also active now */
@@ -7823,6 +7867,8 @@ enable_gating:
ufshcd_release(hba);
out:
hba->pm_op_in_progress = 0;
+ if (ret)
+ ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret);
return ret;
}
@@ -7925,6 +7971,8 @@ disable_irq_and_vops_clks:
ufshcd_setup_clocks(hba, false);
out:
hba->pm_op_in_progress = 0;
+ if (ret)
+ ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret);
return ret;
}
@@ -8324,6 +8372,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating;
}
+ /* Reset the attached device */
+ ufshcd_vops_device_reset(hba);
+
/* Host controller enable */
err = ufshcd_hba_enable(hba);
if (err) {
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 994d73d03207..c94cfda52829 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -298,6 +298,7 @@ struct ufs_pwr_mode_info {
* @resume: called during host controller PM callback
* @dbg_register_dump: used to dump controller debug information
* @phy_initialization: used to initialize phys
+ * @device_reset: called to issue a reset pulse on the UFS device
*/
struct ufs_hba_variant_ops {
const char *name;
@@ -326,6 +327,7 @@ struct ufs_hba_variant_ops {
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
void (*dbg_register_dump)(struct ufs_hba *hba);
int (*phy_initialization)(struct ufs_hba *);
+ void (*device_reset)(struct ufs_hba *hba);
};
/* clock gating state */
@@ -412,17 +414,17 @@ struct ufs_init_prefetch {
u32 icc_level;
};
-#define UIC_ERR_REG_HIST_LENGTH 8
+#define UFS_ERR_REG_HIST_LENGTH 8
/**
- * struct ufs_uic_err_reg_hist - keeps history of uic errors
+ * struct ufs_err_reg_hist - keeps history of errors
* @pos: index to indicate cyclic buffer position
* @reg: cyclic buffer for registers value
* @tstamp: cyclic buffer for time stamp
*/
-struct ufs_uic_err_reg_hist {
+struct ufs_err_reg_hist {
int pos;
- u32 reg[UIC_ERR_REG_HIST_LENGTH];
- ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
+ u32 reg[UFS_ERR_REG_HIST_LENGTH];
+ ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
};
/**
@@ -436,15 +438,37 @@ struct ufs_uic_err_reg_hist {
* @nl_err: tracks nl-uic errors
* @tl_err: tracks tl-uic errors
* @dme_err: tracks dme errors
+ * @auto_hibern8_err: tracks auto-hibernate errors
+ * @fatal_err: tracks fatal errors
+ * @linkup_err: tracks link-startup errors
+ * @resume_err: tracks resume errors
+ * @suspend_err: tracks suspend errors
+ * @dev_reset: tracks device reset events
+ * @host_reset: tracks host reset events
+ * @tsk_abort: tracks task abort events
*/
struct ufs_stats {
u32 hibern8_exit_cnt;
ktime_t last_hibern8_exit_tstamp;
- struct ufs_uic_err_reg_hist pa_err;
- struct ufs_uic_err_reg_hist dl_err;
- struct ufs_uic_err_reg_hist nl_err;
- struct ufs_uic_err_reg_hist tl_err;
- struct ufs_uic_err_reg_hist dme_err;
+
+ /* uic specific errors */
+ struct ufs_err_reg_hist pa_err;
+ struct ufs_err_reg_hist dl_err;
+ struct ufs_err_reg_hist nl_err;
+ struct ufs_err_reg_hist tl_err;
+ struct ufs_err_reg_hist dme_err;
+
+ /* fatal errors */
+ struct ufs_err_reg_hist auto_hibern8_err;
+ struct ufs_err_reg_hist fatal_err;
+ struct ufs_err_reg_hist link_startup_err;
+ struct ufs_err_reg_hist resume_err;
+ struct ufs_err_reg_hist suspend_err;
+
+ /* abnormal events */
+ struct ufs_err_reg_hist dev_reset;
+ struct ufs_err_reg_hist host_reset;
+ struct ufs_err_reg_hist task_abort;
};
/**
@@ -891,8 +915,11 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
- u8 *buf, u32 size, bool ascii);
+
+#define SD_ASCII_STD true
+#define SD_RAW false
+int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
+ u8 **buf, bool ascii);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
@@ -1045,6 +1072,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
hba->vops->dbg_register_dump(hba);
}
+static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->device_reset)
+ hba->vops->device_reset(hba);
+}
+
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/*
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 297e1076e571..bfec84aacd90 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -30,6 +30,8 @@
#include <linux/seqlock.h>
#include <linux/blk-mq-virtio.h>
+#include "sd.h"
+
#define VIRTIO_SCSI_MEMPOOL_SZ 64
#define VIRTIO_SCSI_EVENT_LEN 8
#define VIRTIO_SCSI_VQ_BASE 2
@@ -324,6 +326,36 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
scsi_device_put(sdev);
}
+static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ unsigned char scsi_cmd[MAX_COMMAND_SIZE];
+ int result, inquiry_len, inq_result_len = 256;
+ char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
+
+ shost_for_each_device(sdev, shost) {
+ inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
+
+ memset(scsi_cmd, 0, sizeof(scsi_cmd));
+ scsi_cmd[0] = INQUIRY;
+ scsi_cmd[4] = (unsigned char) inquiry_len;
+
+ memset(inq_result, 0, inq_result_len);
+
+ result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
+ inq_result, inquiry_len, NULL,
+ SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+ if (result == 0 && inq_result[0] >> 5) {
+ /* PQ indicates the LUN is not attached */
+ scsi_remove_device(sdev);
+ }
+ }
+
+ kfree(inq_result);
+}
+
static void virtscsi_handle_event(struct work_struct *work)
{
struct virtio_scsi_event_node *event_node =
@@ -335,6 +367,7 @@ static void virtscsi_handle_event(struct work_struct *work)
cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
event->event &= ~cpu_to_virtio32(vscsi->vdev,
VIRTIO_SCSI_T_EVENTS_MISSED);
+ virtscsi_rescan_hotunplug(vscsi);
scsi_scan_host(virtio_scsi_host(vscsi->vdev));
}
@@ -369,14 +402,7 @@ static void virtscsi_event_done(struct virtqueue *vq)
virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
};
-/**
- * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
- * @vq : the struct virtqueue we're talking about
- * @cmd : command structure
- * @req_size : size of the request buffer
- * @resp_size : size of the response buffer
- */
-static int virtscsi_add_cmd(struct virtqueue *vq,
+static int __virtscsi_add_cmd(struct virtqueue *vq,
struct virtio_scsi_cmd *cmd,
size_t req_size, size_t resp_size)
{
@@ -421,17 +447,39 @@ static int virtscsi_add_cmd(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
}
-static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
+static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
+{
+ bool needs_kick;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vq->vq_lock, flags);
+ needs_kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irqrestore(&vq->vq_lock, flags);
+
+ if (needs_kick)
+ virtqueue_notify(vq->vq);
+}
+
+/**
+ * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
+ * @vq : the struct virtqueue we're talking about
+ * @cmd : command structure
+ * @req_size : size of the request buffer
+ * @resp_size : size of the response buffer
+ * @kick : whether to kick the virtqueue immediately
+ */
+static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
struct virtio_scsi_cmd *cmd,
- size_t req_size, size_t resp_size)
+ size_t req_size, size_t resp_size,
+ bool kick)
{
unsigned long flags;
int err;
bool needs_kick = false;
spin_lock_irqsave(&vq->vq_lock, flags);
- err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
- if (!err)
+ err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
+ if (!err && kick)
needs_kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->vq_lock, flags);
@@ -496,6 +544,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *shost,
struct virtio_scsi *vscsi = shost_priv(shost);
struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ bool kick;
unsigned long flags;
int req_size;
int ret;
@@ -525,7 +574,8 @@ static int virtscsi_queuecommand(struct Scsi_Host *shost,
req_size = sizeof(cmd->req.cmd);
}
- ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ kick = (sc->flags & SCMD_LAST) != 0;
+ ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
if (ret == -EIO) {
cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
spin_lock_irqsave(&req_vq->vq_lock, flags);
@@ -543,8 +593,8 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
int ret = FAILED;
cmd->comp = &comp;
- if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
- sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
+ if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
goto out;
wait_for_completion(&comp);
@@ -658,6 +708,13 @@ static int virtscsi_map_queues(struct Scsi_Host *shost)
return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
+static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
+{
+ struct virtio_scsi *vscsi = shost_priv(shost);
+
+ virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
+}
+
/*
* The host guarantees to respond to each command, although I/O
* latencies might be higher than on bare metal. Reset the timer
@@ -675,6 +732,7 @@ static struct scsi_host_template virtscsi_host_template = {
.this_id = -1,
.cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand,
+ .commit_rqs = virtscsi_commit_rqs,
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index fb7b289fa09f..f81046f0e68a 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1854,6 +1854,7 @@ round_4(unsigned int x)
case 1: --x;
break;
case 2: ++x;
+ /* fall through */
case 3: ++x;
}
return x;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 661bb9358364..35be1be87d2a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1712,6 +1712,24 @@ static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
return 0;
}
+static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return;
+
+ if (udev->nl_reply_supported <= 0)
+ return;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+
+ list_del(&nl_cmd->nl_list);
+ memset(nl_cmd, 0, sizeof(*nl_cmd));
+
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+}
+
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
{
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
@@ -1792,6 +1810,8 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
if (ret == 0 ||
(ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
return tcmu_wait_genl_cmd_reply(udev);
+ else
+ tcmu_destroy_genl_cmd_reply(udev);
return ret;
}
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 98d904961b33..10f81629b9ce 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -6,6 +6,8 @@
#ifndef _NVME_FC_DRIVER_H
#define _NVME_FC_DRIVER_H 1
+#include <linux/scatterlist.h>
+
/*
* ********************** LLDD FC-NVME Host API ********************
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 76ed5e4acd38..91bd749a02f7 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -57,6 +57,7 @@ struct scsi_pointer {
#define SCMD_TAGGED (1 << 0)
#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
#define SCMD_INITIALIZED (1 << 2)
+#define SCMD_LAST (1 << 3)
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
index e03bd9d41fa8..7b196d234626 100644
--- a/include/scsi/scsi_dbg.h
+++ b/include/scsi/scsi_dbg.h
@@ -6,8 +6,6 @@ struct scsi_cmnd;
struct scsi_device;
struct scsi_sense_hdr;
-#define SCSI_LOG_BUFSIZE 128
-
extern void scsi_print_command(struct scsi_cmnd *);
extern size_t __scsi_format_command(char *, size_t,
const unsigned char *, size_t);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index cc139dbd71e5..31e0d6ca1eba 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -80,8 +80,10 @@ struct scsi_host_template {
* command block to the LLDD. When the driver finished
* processing the command the done callback is invoked.
*
- * If queuecommand returns 0, then the HBA has accepted the
- * command. The done() function must be called on the command
+ * If queuecommand returns 0, then the driver has accepted the
+ * command. It must also push it to the HBA if the scsi_cmnd
+ * flag SCMD_LAST is set, or if the driver does not implement
+ * commit_rqs. The done() function must be called on the command
* when the driver has finished with it. (you may call done on the
* command before queuecommand returns, but in this case you
* *must* return 0 from queuecommand).
@@ -110,6 +112,16 @@ struct scsi_host_template {
int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
/*
+ * The commit_rqs function is used to trigger a hardware
+ * doorbell after some requests have been queued with
+ * queuecommand, when an error is encountered before sending
+ * the request with SCMD_LAST set.
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*commit_rqs)(struct Scsi_Host *, u16);
+
+ /*
* This is an error handling strategy routine. You don't need to
* define one of these if you don't want to - there is a default
* routine that is present that should work in most cases. For those
diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 52f32a60d056..3ae65e93235c 100644
--- a/include/uapi/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
@@ -8,6 +8,8 @@
#ifndef SCSI_BSG_FC_H
#define SCSI_BSG_FC_H
+#include <linux/types.h>
+
/*
* This file intended to be included by both kernel and user space
*/
@@ -66,10 +68,10 @@
* with the transport upon completion of the login.
*/
struct fc_bsg_host_add_rport {
- uint8_t reserved;
+ __u8 reserved;
/* FC Address Identier of the remote port to login to */
- uint8_t port_id[3];
+ __u8 port_id[3];
};
/* Response:
@@ -87,10 +89,10 @@ struct fc_bsg_host_add_rport {
* remain logged in with the remote port.
*/
struct fc_bsg_host_del_rport {
- uint8_t reserved;
+ __u8 reserved;
/* FC Address Identier of the remote port to logout of */
- uint8_t port_id[3];
+ __u8 port_id[3];
};
/* Response:
@@ -111,10 +113,10 @@ struct fc_bsg_host_els {
* ELS Command Code being sent (must be the same as byte 0
* of the payload)
*/
- uint8_t command_code;
+ __u8 command_code;
/* FC Address Identier of the remote port to send the ELS to */
- uint8_t port_id[3];
+ __u8 port_id[3];
};
/* Response:
@@ -151,14 +153,14 @@ struct fc_bsg_ctels_reply {
* Note: x_RJT/BSY status will indicae that the rjt_data field
* is valid and contains the reason/explanation values.
*/
- uint32_t status; /* See FC_CTELS_STATUS_xxx */
+ __u32 status; /* See FC_CTELS_STATUS_xxx */
/* valid if status is not FC_CTELS_STATUS_OK */
struct {
- uint8_t action; /* fragment_id for CT REJECT */
- uint8_t reason_code;
- uint8_t reason_explanation;
- uint8_t vendor_unique;
+ __u8 action; /* fragment_id for CT REJECT */
+ __u8 reason_code;
+ __u8 reason_explanation;
+ __u8 vendor_unique;
} rjt_data;
};
@@ -174,17 +176,17 @@ struct fc_bsg_ctels_reply {
* and whether to tear it down after the request.
*/
struct fc_bsg_host_ct {
- uint8_t reserved;
+ __u8 reserved;
/* FC Address Identier of the remote port to send the ELS to */
- uint8_t port_id[3];
+ __u8 port_id[3];
/*
* We need words 0-2 of the generic preamble for the LLD's
*/
- uint32_t preamble_word0; /* revision & IN_ID */
- uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
- uint32_t preamble_word2; /* Cmd Code, Max Size */
+ __u32 preamble_word0; /* revision & IN_ID */
+ __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
+ __u32 preamble_word2; /* Cmd Code, Max Size */
};
/* Response:
@@ -204,17 +206,17 @@ struct fc_bsg_host_vendor {
* Identifies the vendor that the message is formatted for. This
* should be the recipient of the message.
*/
- uint64_t vendor_id;
+ __u64 vendor_id;
/* start of vendor command area */
- uint32_t vendor_cmd[0];
+ __u32 vendor_cmd[0];
};
/* Response:
*/
struct fc_bsg_host_vendor_reply {
/* start of vendor response area */
- uint32_t vendor_rsp[0];
+ __u32 vendor_rsp[0];
};
@@ -233,7 +235,7 @@ struct fc_bsg_rport_els {
* ELS Command Code being sent (must be the same as
* byte 0 of the payload)
*/
- uint8_t els_code;
+ __u8 els_code;
};
/* Response:
@@ -251,9 +253,9 @@ struct fc_bsg_rport_ct {
/*
* We need words 0-2 of the generic preamble for the LLD's
*/
- uint32_t preamble_word0; /* revision & IN_ID */
- uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
- uint32_t preamble_word2; /* Cmd Code, Max Size */
+ __u32 preamble_word0; /* revision & IN_ID */
+ __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */
+ __u32 preamble_word2; /* Cmd Code, Max Size */
};
/* Response:
*
@@ -265,7 +267,7 @@ struct fc_bsg_rport_ct {
/* request (CDB) structure of the sg_io_v4 */
struct fc_bsg_request {
- uint32_t msgcode;
+ __u32 msgcode;
union {
struct fc_bsg_host_add_rport h_addrport;
struct fc_bsg_host_del_rport h_delrport;
@@ -289,10 +291,10 @@ struct fc_bsg_reply {
* msg and status fields. The per-msgcode reply structure
* will contain valid data.
*/
- uint32_t result;
+ __u32 result;
/* If there was reply_payload, how much was recevied ? */
- uint32_t reply_payload_rcv_len;
+ __u32 reply_payload_rcv_len;
union {
struct fc_bsg_host_vendor_reply vendor_reply;
diff --git a/include/uapi/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h
index 5dd382054e45..1b1737c3c9d8 100644
--- a/include/uapi/scsi/scsi_netlink.h
+++ b/include/uapi/scsi/scsi_netlink.h
@@ -26,12 +26,12 @@
/* SCSI_TRANSPORT_MSG event message header */
struct scsi_nl_hdr {
- uint8_t version;
- uint8_t transport;
- uint16_t magic;
- uint16_t msgtype;
- uint16_t msglen;
-} __attribute__((aligned(sizeof(uint64_t))));
+ __u8 version;
+ __u8 transport;
+ __u16 magic;
+ __u16 msgtype;
+ __u16 msglen;
+} __attribute__((aligned(sizeof(__u64))));
/* scsi_nl_hdr->version value */
#define SCSI_NL_VERSION 1
@@ -75,10 +75,10 @@ struct scsi_nl_hdr {
*/
struct scsi_nl_host_vendor_msg {
struct scsi_nl_hdr snlh; /* must be 1st element ! */
- uint64_t vendor_id;
- uint16_t host_no;
- uint16_t vmsg_datalen;
-} __attribute__((aligned(sizeof(uint64_t))));
+ __u64 vendor_id;
+ __u16 host_no;
+ __u16 vmsg_datalen;
+} __attribute__((aligned(sizeof(__u64))));
/*
diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index a39023579051..7535253f1a96 100644
--- a/include/uapi/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
@@ -7,6 +7,7 @@
#ifndef SCSI_NETLINK_FC_H
#define SCSI_NETLINK_FC_H
+#include <linux/types.h>
#include <scsi/scsi_netlink.h>
/*
@@ -43,14 +44,14 @@
*/
struct fc_nl_event {
struct scsi_nl_hdr snlh; /* must be 1st element ! */
- uint64_t seconds;
- uint64_t vendor_id;
- uint16_t host_no;
- uint16_t event_datalen;
- uint32_t event_num;
- uint32_t event_code;
- uint32_t event_data;
-} __attribute__((aligned(sizeof(uint64_t))));
+ __u64 seconds;
+ __u64 vendor_id;
+ __u16 host_no;
+ __u16 event_datalen;
+ __u32 event_num;
+ __u32 event_code;
+ __u32 event_data;
+} __attribute__((aligned(sizeof(__u64))));
#endif /* SCSI_NETLINK_FC_H */