aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/3w-xxxx.c14
-rw-r--r--drivers/scsi/3w-xxxx.h2
-rw-r--r--drivers/scsi/53c700.c3
-rw-r--r--drivers/scsi/BusLogic.c35
-rw-r--r--drivers/scsi/FlashPoint.c4
-rw-r--r--drivers/scsi/Kconfig21
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c94
-rw-r--r--drivers/scsi/NCR5380.h13
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/a2091.c78
-rw-r--r--drivers/scsi/a3000.c72
-rw-r--r--drivers/scsi/aacraid/aachba.c47
-rw-r--r--drivers/scsi/aacraid/aacraid.h26
-rw-r--r--drivers/scsi/aacraid/comminit.c4
-rw-r--r--drivers/scsi/aacraid/linit.c25
-rw-r--r--drivers/scsi/advansys.c22
-rw-r--r--drivers/scsi/aha152x.c269
-rw-r--r--drivers/scsi/aha1542.c23
-rw-r--r--drivers/scsi/aha1740.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c11
-rw-r--r--drivers/scsi/arm/acornscsi.c28
-rw-r--r--drivers/scsi/arm/arm_scsi.h (renamed from drivers/scsi/arm/scsi.h)37
-rw-r--r--drivers/scsi/arm/arxescsi.c6
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/cumana_2.c8
-rw-r--r--drivers/scsi/arm/eesox.c8
-rw-r--r--drivers/scsi/arm/fas216.c36
-rw-r--r--drivers/scsi/arm/fas216.h4
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/arm/powertec.c8
-rw-r--r--drivers/scsi/arm/queue.c6
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c24
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_attr.c28
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c3
-rw-r--r--drivers/scsi/bfa/bfad_im.c28
-rw-r--r--drivers/scsi/bfa/bfad_im.h16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c57
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c40
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c5
-rw-r--r--drivers/scsi/ch.c10
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c30
-rw-r--r--drivers/scsi/csiostor/csio_scsi.h10
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c10
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1
-rw-r--r--drivers/scsi/cxlflash/vlun.c4
-rw-r--r--drivers/scsi/dc395x.c21
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h441
-rw-r--r--drivers/scsi/dpt/dpti_ioctl.h136
-rw-r--r--drivers/scsi/dpt/dptsig.h336
-rw-r--r--drivers/scsi/dpt/osd_defs.h79
-rw-r--r--drivers/scsi/dpt/osd_util.h358
-rw-r--r--drivers/scsi/dpt/sys_info.h417
-rw-r--r--drivers/scsi/dpt_i2o.c3546
-rw-r--r--drivers/scsi/dpti.h331
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c13
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c11
-rw-r--r--drivers/scsi/elx/efct/efct_io.c3
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c3
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c12
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c16
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.h20
-rw-r--r--drivers/scsi/esas2r/atioctl.h1
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c2
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c3
-rw-r--r--drivers/scsi/esp_scsi.c4
-rw-r--r--drivers/scsi/esp_scsi.h3
-rw-r--r--drivers/scsi/fcoe/fcoe.c48
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c32
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c6
-rw-r--r--drivers/scsi/fdomain.c64
-rw-r--r--drivers/scsi/fnic/cq_desc.h14
-rw-r--r--drivers/scsi/fnic/cq_enet_desc.h14
-rw-r--r--drivers/scsi/fnic/cq_exch_desc.h14
-rw-r--r--drivers/scsi/fnic/fcpio.h14
-rw-r--r--drivers/scsi/fnic/fnic.h43
-rw-r--r--drivers/scsi/fnic/fnic_attrs.c14
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c21
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c14
-rw-r--r--drivers/scsi/fnic/fnic_fip.h14
-rw-r--r--drivers/scsi/fnic/fnic_io.h14
-rw-r--r--drivers/scsi/fnic/fnic_isr.c15
-rw-r--r--drivers/scsi/fnic/fnic_main.c71
-rw-r--r--drivers/scsi/fnic/fnic_res.c14
-rw-r--r--drivers/scsi/fnic/fnic_res.h14
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c330
-rw-r--r--drivers/scsi/fnic/fnic_stats.h18
-rw-r--r--drivers/scsi/fnic/fnic_trace.c18
-rw-r--r--drivers/scsi/fnic/fnic_trace.h18
-rw-r--r--drivers/scsi/fnic/rq_enet_desc.h14
-rw-r--r--drivers/scsi/fnic/vnic_cq.c14
-rw-r--r--drivers/scsi/fnic/vnic_cq.h14
-rw-r--r--drivers/scsi/fnic/vnic_cq_copy.h14
-rw-r--r--drivers/scsi/fnic/vnic_dev.c14
-rw-r--r--drivers/scsi/fnic/vnic_dev.h14
-rw-r--r--drivers/scsi/fnic/vnic_devcmd.h14
-rw-r--r--drivers/scsi/fnic/vnic_intr.c14
-rw-r--r--drivers/scsi/fnic/vnic_intr.h14
-rw-r--r--drivers/scsi/fnic/vnic_nic.h14
-rw-r--r--drivers/scsi/fnic/vnic_resource.h14
-rw-r--r--drivers/scsi/fnic/vnic_rq.c15
-rw-r--r--drivers/scsi/fnic/vnic_rq.h14
-rw-r--r--drivers/scsi/fnic/vnic_scsi.h14
-rw-r--r--drivers/scsi/fnic/vnic_stats.h14
-rw-r--r--drivers/scsi/fnic/vnic_wq.c14
-rw-r--r--drivers/scsi/fnic/vnic_wq.h14
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.c15
-rw-r--r--drivers/scsi/fnic/vnic_wq_copy.h14
-rw-r--r--drivers/scsi/fnic/wq_enet_desc.h14
-rw-r--r--drivers/scsi/g_NCR5380.c6
-rw-r--r--drivers/scsi/gvp11.c110
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h17
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c1084
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c41
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c221
-rw-r--r--drivers/scsi/hosts.c62
-rw-r--r--drivers/scsi/hpsa.c14
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/hptiop.h8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c96
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c13
-rw-r--r--drivers/scsi/imm.c88
-rw-r--r--drivers/scsi/imm.h5
-rw-r--r--drivers/scsi/initio.c21
-rw-r--r--drivers/scsi/initio.h9
-rw-r--r--drivers/scsi/ipr.c17
-rw-r--r--drivers/scsi/ips.c52
-rw-r--r--drivers/scsi/isci/host.c6
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/isci/request.c24
-rw-r--r--drivers/scsi/isci/request.h5
-rw-r--r--drivers/scsi/isci/task.c41
-rw-r--r--drivers/scsi/isci/task.h4
-rw-r--r--drivers/scsi/iscsi_tcp.c148
-rw-r--r--drivers/scsi/iscsi_tcp.h5
-rw-r--r--drivers/scsi/libfc/fc_encode.h2
-rw-r--r--drivers/scsi/libfc/fc_exch.c4
-rw-r--r--drivers/scsi/libfc/fc_fcp.c55
-rw-r--r--drivers/scsi/libfc/fc_lport.c30
-rw-r--r--drivers/scsi/libiscsi.c390
-rw-r--r--drivers/scsi/libiscsi_tcp.c8
-rw-r--r--drivers/scsi/libsas/sas_ata.c66
-rw-r--r--drivers/scsi/libsas/sas_discover.c7
-rw-r--r--drivers/scsi/libsas/sas_event.c115
-rw-r--r--drivers/scsi/libsas/sas_expander.c96
-rw-r--r--drivers/scsi/libsas/sas_init.c53
-rw-r--r--drivers/scsi/libsas/sas_internal.h14
-rw-r--r--drivers/scsi/libsas/sas_port.c73
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c330
-rw-r--r--drivers/scsi/libsas/sas_task.c14
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h273
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c723
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c735
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c1754
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c103
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2499
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c437
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h211
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h93
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h34
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c975
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c205
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c185
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c210
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c210
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1010
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c3380
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c286
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c185
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h6
-rw-r--r--drivers/scsi/mac53c94.c28
-rw-r--r--drivers/scsi/mac53c94.h11
-rw-r--r--drivers/scsi/mac_scsi.c9
-rw-r--r--drivers/scsi/megaraid.c107
-rw-r--r--drivers/scsi/megaraid.h23
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h15
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c116
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c27
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h12
-rw-r--r--drivers/scsi/mesh.c26
-rw-r--r--drivers/scsi/mesh.h11
-rw-r--r--drivers/scsi/mpi3mr/Kconfig2
-rw-r--r--drivers/scsi/mpi3mr/Makefile2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h852
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h65
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h58
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h177
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h16
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h17
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h39
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h586
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c1860
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h187
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c2828
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c1760
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c3291
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h1
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c308
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c133
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c110
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c61
-rw-r--r--drivers/scsi/mvme147.c16
-rw-r--r--drivers/scsi/mvsas/mv_defs.h5
-rw-r--r--drivers/scsi/mvsas/mv_init.c16
-rw-r--r--drivers/scsi/mvsas/mv_sas.c190
-rw-r--r--drivers/scsi/mvsas/mv_sas.h3
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/mvumi.h9
-rw-r--r--drivers/scsi/myrb.c13
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/ncr53c8xx.c22
-rw-r--r--drivers/scsi/ncr53c8xx.h6
-rw-r--r--drivers/scsi/nsp32.c20
-rw-r--r--drivers/scsi/nsp32.h9
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c9
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c249
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h8
-rw-r--r--drivers/scsi/pcmcia/nsp_debug.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c9
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c45
-rw-r--r--drivers/scsi/pm8001/Makefile7
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c90
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c575
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c96
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c601
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h94
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c671
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h22
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.h113
-rw-r--r--drivers/scsi/pmcraid.c498
-rw-r--r--drivers/scsi/pmcraid.h33
-rw-r--r--drivers/scsi/ppa.c75
-rw-r--r--drivers/scsi/qedf/qedf.h10
-rw-r--r--drivers/scsi/qedf/qedf_attr.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c31
-rw-r--r--drivers/scsi/qedf/qedf_main.c51
-rw-r--r--drivers/scsi/qedi/qedi_fw.c10
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c70
-rw-r--r--drivers/scsi/qedi/qedi_main.c23
-rw-r--r--drivers/scsi/qla1280.c24
-rw-r--r--drivers/scsi/qla1280.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c72
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c110
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h105
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c615
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h110
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h31
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c287
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c365
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c98
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c141
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c104
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c77
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c216
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c168
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c23
-rw-r--r--drivers/scsi/qlogicfas.c6
-rw-r--r--drivers/scsi/qlogicfas408.c6
-rw-r--r--drivers/scsi/qlogicpti.c3
-rw-r--r--drivers/scsi/scsi.c117
-rw-r--r--drivers/scsi/scsi.h46
-rw-r--r--drivers/scsi/scsi_bsg.c45
-rw-r--r--drivers/scsi/scsi_debug.c479
-rw-r--r--drivers/scsi/scsi_debugfs.c7
-rw-r--r--drivers/scsi/scsi_error.c212
-rw-r--r--drivers/scsi/scsi_ioctl.c140
-rw-r--r--drivers/scsi/scsi_lib.c335
-rw-r--r--drivers/scsi/scsi_logging.c9
-rw-r--r--drivers/scsi/scsi_pm.c3
-rw-r--r--drivers/scsi/scsi_priv.h20
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c77
-rw-r--r--drivers/scsi/scsi_sysfs.c61
-rw-r--r--drivers/scsi/scsi_transport_fc.c49
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c498
-rw-r--r--drivers/scsi/scsi_transport_sas.c20
-rw-r--r--drivers/scsi/scsi_transport_spi.c7
-rw-r--r--drivers/scsi/scsicam.c12
-rw-r--r--drivers/scsi/sd.c470
-rw-r--r--drivers/scsi/sd.h49
-rw-r--r--drivers/scsi/sd_dif.c8
-rw-r--r--drivers/scsi/sd_zbc.c288
-rw-r--r--drivers/scsi/sg.c183
-rw-r--r--drivers/scsi/sgiwd93.c24
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h45
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c954
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c21
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h5
-rw-r--r--drivers/scsi/snic/cq_desc.h18
-rw-r--r--drivers/scsi/snic/cq_enet_desc.h18
-rw-r--r--drivers/scsi/snic/snic.h18
-rw-r--r--drivers/scsi/snic/snic_attrs.c18
-rw-r--r--drivers/scsi/snic/snic_ctl.c18
-rw-r--r--drivers/scsi/snic/snic_debugfs.c18
-rw-r--r--drivers/scsi/snic/snic_disc.c20
-rw-r--r--drivers/scsi/snic/snic_disc.h18
-rw-r--r--drivers/scsi/snic/snic_fwint.h20
-rw-r--r--drivers/scsi/snic/snic_io.c18
-rw-r--r--drivers/scsi/snic/snic_io.h18
-rw-r--r--drivers/scsi/snic/snic_isr.c18
-rw-r--r--drivers/scsi/snic/snic_main.c18
-rw-r--r--drivers/scsi/snic/snic_res.c18
-rw-r--r--drivers/scsi/snic/snic_res.h18
-rw-r--r--drivers/scsi/snic/snic_scsi.c18
-rw-r--r--drivers/scsi/snic/snic_stats.h18
-rw-r--r--drivers/scsi/snic/snic_trc.c18
-rw-r--r--drivers/scsi/snic/snic_trc.h18
-rw-r--r--drivers/scsi/snic/vnic_cq.c18
-rw-r--r--drivers/scsi/snic/vnic_cq.h18
-rw-r--r--drivers/scsi/snic/vnic_cq_fw.h18
-rw-r--r--drivers/scsi/snic/vnic_dev.c18
-rw-r--r--drivers/scsi/snic/vnic_dev.h18
-rw-r--r--drivers/scsi/snic/vnic_devcmd.h18
-rw-r--r--drivers/scsi/snic/vnic_intr.c18
-rw-r--r--drivers/scsi/snic/vnic_intr.h18
-rw-r--r--drivers/scsi/snic/vnic_resource.h18
-rw-r--r--drivers/scsi/snic/vnic_snic.h18
-rw-r--r--drivers/scsi/snic/vnic_stats.h18
-rw-r--r--drivers/scsi/snic/vnic_wq.c18
-rw-r--r--drivers/scsi/snic/vnic_wq.h18
-rw-r--r--drivers/scsi/snic/wq_enet_desc.h18
-rw-r--r--drivers/scsi/sr.c200
-rw-r--r--drivers/scsi/sr.h6
-rw-r--r--drivers/scsi/sr_ioctl.c15
-rw-r--r--drivers/scsi/sr_vendor.c4
-rw-r--r--drivers/scsi/st.c43
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c17
-rw-r--r--drivers/scsi/storvsc_drv.c289
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/ufs/Kconfig210
-rw-r--r--drivers/scsi/ufs/Makefile26
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c343
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c150
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pltfrm.c107
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c317
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.h16
-rw-r--r--drivers/scsi/ufs/ti-j721e-ufs.c98
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c238
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h24
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c1629
-rw-r--r--drivers/scsi/ufs/ufs-exynos.h269
-rw-r--r--drivers/scsi/ufs/ufs-fault-injection.c70
-rw-r--r--drivers/scsi/ufs/ufs-fault-injection.h24
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c599
-rw-r--r--drivers/scsi/ufs/ufs-hisi.h104
-rw-r--r--drivers/scsi/ufs/ufs-hwmon.c210
-rw-r--r--drivers/scsi/ufs/ufs-mediatek-trace.h36
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c1253
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h145
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.c245
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c1581
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h272
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c1267
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.h17
-rw-r--r--drivers/scsi/ufs/ufs.h652
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c227
-rw-r--r--drivers/scsi/ufs/ufs_bsg.h23
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h125
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c240
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.h71
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.c148
-rw-r--r--drivers/scsi/ufs/ufshcd-dwc.h23
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c606
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c382
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h36
-rw-r--r--drivers/scsi/ufs/ufshcd.c9823
-rw-r--r--drivers/scsi/ufs/ufshcd.h1428
-rw-r--r--drivers/scsi/ufs/ufshci-dwc.h33
-rw-r--r--drivers/scsi/ufs/ufshci.h507
-rw-r--r--drivers/scsi/ufs/ufshpb.c2653
-rw-r--r--drivers/scsi/ufs/ufshpb.h320
-rw-r--r--drivers/scsi/ufs/unipro.h332
-rw-r--r--drivers/scsi/virtio_scsi.c20
-rw-r--r--drivers/scsi/vmw_pvscsi.c1
-rw-r--r--drivers/scsi/vmw_pvscsi.h4
-rw-r--r--drivers/scsi/wd33c93.c179
-rw-r--r--drivers/scsi/wd33c93.h9
-rw-r--r--drivers/scsi/wd719x.c15
-rw-r--r--drivers/scsi/wd719x.h1
-rw-r--r--drivers/scsi/xen-scsifront.c194
-rw-r--r--drivers/scsi/zalon.c1
-rw-r--r--drivers/scsi/zorro7xx.c2
454 files changed, 27686 insertions, 49701 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index cd823ff5deab..6cb9cca9565b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2006,7 +2006,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
retval = pci_enable_device(pdev);
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
- goto out_disable_device;
+ return -ENODEV;
}
pci_set_master(pdev);
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b9482da79512..3ebe66151dcb 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1567,8 +1567,6 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
@@ -1786,8 +1784,6 @@ static int __maybe_unused twl_resume(struct device *dev)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index a853c5497af6..ffdecb12d654 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -912,7 +912,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
data_buffer_length_adjusted = (data_buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
retval = -ENOMEM;
goto out;
@@ -921,7 +921,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
tw_ioctl = (TW_New_Ioctl *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl) - 1))
+ if (copy_from_user(tw_ioctl, argp, data_buffer_length + sizeof(TW_New_Ioctl)))
goto out2;
passthru = (TW_Passthru *)&tw_ioctl->firmware_command;
@@ -966,15 +966,15 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
/* Load the sg list */
switch (TW_SGL_OUT(tw_ioctl->firmware_command.opcode__sgloffset)) {
case 2:
- tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.param.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.param.sgl[0].length = data_buffer_length_adjusted;
break;
case 3:
- tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ tw_ioctl->firmware_command.byte8.io.sgl[0].address = dma_handle + sizeof(TW_New_Ioctl);
tw_ioctl->firmware_command.byte8.io.sgl[0].length = data_buffer_length_adjusted;
break;
case 5:
- passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl) - 1;
+ passthru->sg_list[0].address = dma_handle + sizeof(TW_New_Ioctl);
passthru->sg_list[0].length = data_buffer_length_adjusted;
break;
}
@@ -1017,12 +1017,12 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
}
/* Now copy the response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length - 1))
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_New_Ioctl) + data_buffer_length))
goto out2;
retval = 0;
out2:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_New_Ioctl), cpu_addr, dma_handle);
out:
mutex_unlock(&tw_dev->ioctl_lock);
mutex_unlock(&tw_mutex);
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index e8f3f081b7d8..120a087bdf3c 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -348,7 +348,7 @@ typedef struct TAG_TW_New_Ioctl {
unsigned int data_buffer_length;
unsigned char padding [508];
TW_Command firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_New_Ioctl;
/* GetParam descriptor */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 3ad3ebaca8e9..e1e4f9d10887 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1507,7 +1507,6 @@ NCR_700_intr(int irq, void *dev_id)
struct scsi_cmnd *SCp = hostdata->cmd;
handled = 1;
- SCp = hostdata->cmd;
if(istat & SCSI_INT_PENDING) {
udelay(10);
@@ -1792,8 +1791,6 @@ static int NCR_700_queuecommand_lck(struct scsi_cmnd *SCp)
slot->cmnd = SCp;
SCp->host_scribble = (unsigned char *)slot;
- SCp->SCp.ptr = NULL;
- SCp->SCp.buffer = NULL;
#ifdef NCR_700_DEBUG
printk("53c700: scsi%d, command ", SCp->device->host->host_no);
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index a897c8f914cf..f2abffce2659 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2515,12 +2515,26 @@ static int blogic_resultcode(struct blogic_adapter *adapter,
return (hoststatus << 16) | tgt_status;
}
+/*
+ * turn the dma address from an inbox into a ccb pointer
+ * This is rather inefficient.
+ */
+static struct blogic_ccb *
+blogic_inbox_to_ccb(struct blogic_adapter *adapter, struct blogic_inbox *inbox)
+{
+ struct blogic_ccb *ccb;
+
+ for (ccb = adapter->all_ccbs; ccb; ccb = ccb->next_all)
+ if (inbox->ccb == ccb->dma_handle)
+ break;
+
+ return ccb;
+}
/*
blogic_scan_inbox scans the Incoming Mailboxes saving any
Incoming Mailbox entries for completion processing.
*/
-
static void blogic_scan_inbox(struct blogic_adapter *adapter)
{
/*
@@ -2540,17 +2554,14 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter)
enum blogic_cmplt_code comp_code;
while ((comp_code = next_inbox->comp_code) != BLOGIC_INBOX_FREE) {
- /*
- We are only allowed to do this because we limit our
- architectures we run on to machines where bus_to_virt(
- actually works. There *needs* to be a dma_addr_to_virt()
- in the new PCI DMA mapping interface to replace
- bus_to_virt() or else this code is going to become very
- innefficient.
- */
- struct blogic_ccb *ccb =
- (struct blogic_ccb *) bus_to_virt(next_inbox->ccb);
- if (comp_code != BLOGIC_CMD_NOTFOUND) {
+ struct blogic_ccb *ccb = blogic_inbox_to_ccb(adapter, next_inbox);
+ if (!ccb) {
+ /*
+ * This should never happen, unless the CCB list is
+ * corrupted in memory.
+ */
+ blogic_warn("Could not find CCB for dma address %x\n", adapter, next_inbox->ccb);
+ } else if (comp_code != BLOGIC_CMD_NOTFOUND) {
if (ccb->status == BLOGIC_CCB_ACTIVE ||
ccb->status == BLOGIC_CCB_RESET) {
/*
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 90253208a72f..3d9c56ac8224 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -1712,7 +1712,7 @@ static unsigned char FlashPoint_InterruptPending(void *pCurrCard)
static int FlashPoint_HandleInterrupt(void *pcard)
{
struct sccb *currSCCB;
- unsigned char thisCard, result, bm_status, bm_int_st;
+ unsigned char thisCard, result, bm_status;
unsigned short hp_int;
unsigned char i, target;
struct sccb_card *pCurrCard = pcard;
@@ -1723,7 +1723,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
MDISABLE_INT(ioport);
- if ((bm_int_st = RD_HARPOON(ioport + hp_int_status)) & EXT_STATUS_ON)
+ if (RD_HARPOON(ioport + hp_int_status) & EXT_STATUS_ON)
bm_status = RD_HARPOON(ioport + hp_ext_status) &
(unsigned char)BAD_EXT_STATUS;
else
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 6e3a04107bb6..03e71e3d5e5b 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -2,9 +2,10 @@
menu "SCSI device support"
config SCSI_MOD
- tristate
- default y if SCSI=n || SCSI=y
- default m if SCSI=m
+ tristate
+ default y if SCSI=n || SCSI=y
+ default m if SCSI=m
+ depends on BLOCK
config RAID_ATTRS
tristate "RAID Transport Class"
@@ -458,17 +459,6 @@ config SCSI_MVUMI
To compile this driver as a module, choose M here: the
module will be called mvumi.
-config SCSI_DPT_I2O
- tristate "Adaptec I2O RAID support "
- depends on SCSI && PCI && VIRT_TO_BUS
- help
- This driver supports all of Adaptec's I2O based RAID controllers as
- well as the DPT SmartRaid V cards. This is an Adaptec maintained
- driver by Deanna Bonds. See <file:Documentation/scsi/dpti.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called dpt_i2o.
-
config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
depends on SCSI
@@ -500,7 +490,6 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/mpi3mr/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
-source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
@@ -514,7 +503,7 @@ config SCSI_HPTIOP
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
- depends on PCI && SCSI && VIRT_TO_BUS
+ depends on PCI && SCSI
help
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
Adapters. Consult the SCSI-HOWTO, available from
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 19814c26c908..f055bfd54a68 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -63,7 +63,6 @@ obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
-obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
@@ -101,7 +100,6 @@ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
-obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_INITIO) += initio.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 55af3e245a92..dece7d9eb4d3 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -84,8 +84,7 @@
* On command termination, the done function will be called as
* appropriate.
*
- * SCSI pointers are maintained in the SCp field of SCSI command
- * structures, being initialized after the command is connected
+ * The command data pointer is initialized after the command is connected
* in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
* Note that in violation of the standard, an implicit SAVE POINTERS operation
* is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
@@ -145,40 +144,38 @@ static void bus_reset_cleanup(struct Scsi_Host *);
static inline void initialize_SCp(struct scsi_cmnd *cmd)
{
- /*
- * Initialize the Scsi Pointer field so that all of the commands in the
- * various queues are valid.
- */
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ ncmd->buffer = scsi_sglist(cmd);
+ ncmd->ptr = sg_virt(ncmd->buffer);
+ ncmd->this_residual = ncmd->buffer->length;
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
+ ncmd->buffer = NULL;
+ ncmd->ptr = NULL;
+ ncmd->this_residual = 0;
}
- cmd->SCp.Status = 0;
- cmd->SCp.Message = 0;
+ ncmd->status = 0;
+ ncmd->message = 0;
}
-static inline void advance_sg_buffer(struct scsi_cmnd *cmd)
+static inline void advance_sg_buffer(struct NCR5380_cmd *ncmd)
{
- struct scatterlist *s = cmd->SCp.buffer;
+ struct scatterlist *s = ncmd->buffer;
- if (!cmd->SCp.this_residual && s && !sg_is_last(s)) {
- cmd->SCp.buffer = sg_next(s);
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ if (!ncmd->this_residual && s && !sg_is_last(s)) {
+ ncmd->buffer = sg_next(s);
+ ncmd->ptr = sg_virt(ncmd->buffer);
+ ncmd->this_residual = ncmd->buffer->length;
}
}
static inline void set_resid_from_SCp(struct scsi_cmnd *cmd)
{
- int resid = cmd->SCp.this_residual;
- struct scatterlist *s = cmd->SCp.buffer;
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
+ int resid = ncmd->this_residual;
+ struct scatterlist *s = ncmd->buffer;
if (s)
while (!sg_is_last(s)) {
@@ -564,7 +561,7 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
struct scsi_cmnd *cmd)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
unsigned long flags;
#if (NDEBUG & NDEBUG_NO_WRITE)
@@ -672,7 +669,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
@@ -757,6 +754,7 @@ static void NCR5380_main(struct work_struct *work)
static void NCR5380_dma_complete(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(hostdata->connected);
int transferred;
unsigned char **data;
int *count;
@@ -764,7 +762,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
unsigned char p;
if (hostdata->read_overruns) {
- p = hostdata->connected->SCp.phase;
+ p = ncmd->phase;
if (p & SR_IO) {
udelay(10);
if ((NCR5380_read(BUS_AND_STATUS_REG) &
@@ -801,8 +799,8 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
hostdata->dma_len = 0;
- data = (unsigned char **)&hostdata->connected->SCp.ptr;
- count = &hostdata->connected->SCp.this_residual;
+ data = (unsigned char **)&ncmd->ptr;
+ count = &ncmd->this_residual;
*data += transferred;
*count -= transferred;
@@ -1498,7 +1496,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
return -1;
}
- hostdata->connected->SCp.phase = p;
+ NCR5380_to_ncmd(hostdata->connected)->phase = p;
if (p & SR_IO) {
if (hostdata->read_overruns)
@@ -1690,7 +1688,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
#endif
while ((cmd = hostdata->connected)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(cmd);
tmp = NCR5380_read(STATUS_REG);
/* We only have a valid SCSI phase when REQ is asserted */
@@ -1705,17 +1703,17 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sun3_dma_setup_done != cmd) {
int count;
- advance_sg_buffer(cmd);
+ advance_sg_buffer(ncmd);
count = sun3scsi_dma_xfer_len(hostdata, cmd);
if (count > 0) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
- cmd->SCp.ptr, count);
+ ncmd->ptr, count);
else
sun3scsi_dma_recv_setup(hostdata,
- cmd->SCp.ptr, count);
+ ncmd->ptr, count);
sun3_dma_setup_done = cmd;
}
#ifdef SUN3_SCSI_VME
@@ -1755,11 +1753,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* scatter-gather list, move onto the next one.
*/
- advance_sg_buffer(cmd);
+ advance_sg_buffer(ncmd);
dsprintk(NDEBUG_INFORMATION, instance,
"this residual %d, sg ents %d\n",
- cmd->SCp.this_residual,
- sg_nents(cmd->SCp.buffer));
+ ncmd->this_residual,
+ sg_nents(ncmd->buffer));
/*
* The preferred transfer method is going to be
@@ -1778,7 +1776,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
if (transfersize > 0) {
len = transfersize;
if (NCR5380_transfer_dma(instance, &phase,
- &len, (unsigned char **)&cmd->SCp.ptr)) {
+ &len, (unsigned char **)&ncmd->ptr)) {
/*
* If the watchdog timer fires, all future
* accesses to this device will use the
@@ -1794,13 +1792,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* Transfer a small chunk so that the
* irq mode lock is not held too long.
*/
- transfersize = min(cmd->SCp.this_residual,
+ transfersize = min(ncmd->this_residual,
NCR5380_PIO_CHUNK_SIZE);
len = transfersize;
NCR5380_transfer_pio(instance, &phase, &len,
- (unsigned char **)&cmd->SCp.ptr,
+ (unsigned char **)&ncmd->ptr,
0);
- cmd->SCp.this_residual -= transfersize - len;
+ ncmd->this_residual -= transfersize - len;
}
#ifdef CONFIG_SUN3
if (sun3_dma_setup_done == cmd)
@@ -1811,7 +1809,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
len = 1;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
- cmd->SCp.Message = tmp;
+ ncmd->message = tmp;
switch (tmp) {
case ABORT:
@@ -1828,15 +1826,15 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
- set_status_byte(cmd, cmd->SCp.Status);
+ set_status_byte(cmd, ncmd->status);
set_resid_from_SCp(cmd);
if (cmd->cmnd[0] == REQUEST_SENSE)
complete_cmd(instance, cmd);
else {
- if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION ||
- cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) {
+ if (ncmd->status == SAM_STAT_CHECK_CONDITION ||
+ ncmd->status == SAM_STAT_COMMAND_TERMINATED) {
dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n",
cmd);
list_add_tail(&ncmd->list,
@@ -2000,7 +1998,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
len = 1;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data, 0);
- cmd->SCp.Status = tmp;
+ ncmd->status = tmp;
break;
default:
shost_printk(KERN_ERR, instance, "unknown phase\n");
@@ -2153,17 +2151,17 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (sun3_dma_setup_done != tmp) {
int count;
- advance_sg_buffer(tmp);
+ advance_sg_buffer(ncmd);
count = sun3scsi_dma_xfer_len(hostdata, tmp);
if (count > 0) {
if (tmp->sc_data_direction == DMA_TO_DEVICE)
sun3scsi_dma_send_setup(hostdata,
- tmp->SCp.ptr, count);
+ ncmd->ptr, count);
else
sun3scsi_dma_recv_setup(hostdata,
- tmp->SCp.ptr, count);
+ ncmd->ptr, count);
sun3_dma_setup_done = tmp;
}
}
@@ -2206,7 +2204,7 @@ static bool list_del_cmd(struct list_head *haystack,
struct scsi_cmnd *needle)
{
if (list_find_cmd(haystack, needle)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle);
+ struct NCR5380_cmd *ncmd = NCR5380_to_ncmd(needle);
list_del(&ncmd->list);
return true;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 8a3b41932288..8dc2be4212dc 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -227,11 +227,15 @@ struct NCR5380_hostdata {
};
struct NCR5380_cmd {
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int phase;
struct list_head list;
};
-#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd))
-
#define NCR5380_PIO_CHUNK_SIZE 256
/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
@@ -242,6 +246,11 @@ static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
return ((struct scsi_cmnd *)ncmd_ptr) - 1;
}
+static inline struct NCR5380_cmd *NCR5380_to_ncmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#ifndef NDEBUG
#define NDEBUG (0)
#endif
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 564ade03b530..d02eb5b213d0 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -904,13 +904,11 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
/**
* inia100_queue_lck - queue command with host
* @cmd: Command block
- * @done: Completion function
*
* Called by the mid layer to queue a command. Process the command
* block, build the host specific scb structures and if there is room
* queue the command down to the controller
*/
-
static int inia100_queue_lck(struct scsi_cmnd *cmd)
{
struct orc_scb *scb;
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 5853db36eceb..74312400468b 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -12,7 +12,11 @@
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "a2091.h"
@@ -20,8 +24,11 @@
struct a2091_hostdata {
struct WD33C93_hostdata wh;
struct a2091_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a2091_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -40,16 +47,33 @@ static irqreturn_t a2091_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/* don't allow DMA if the physical address is bad */
if (addr & A2091_XFER_MASK) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
@@ -59,8 +83,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- /* get the physical address of the bounce buffer */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev, wh->dma_bounce_buffer,
+ wh->dma_bounce_len, DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
/* the bounce buffer may not be in the first 16M of physmem */
if (addr & A2091_XFER_MASK) {
@@ -71,11 +108,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
- }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -90,13 +123,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
+
/* start DMA */
regs->ST_DMA = 1;
@@ -107,6 +135,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct a2091_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a2091_scsiregs *regs = hdata->regs;
@@ -136,11 +165,15 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* restore the CONTROL bits (minus the direction flag) */
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
kfree(wh->dma_bounce_buffer);
wh->dma_bounce_buffer = NULL;
wh->dma_bounce_len = 0;
@@ -161,6 +194,7 @@ static struct scsi_host_template a2091_scsi_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
@@ -171,6 +205,11 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wd33c93_regs wdregs;
struct a2091_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&z->dev, DMA_BIT_MASK(24))) {
+ dev_warn(&z->dev, "cannot use 24 bit DMA\n");
+ return -ENODEV;
+ }
+
if (!request_mem_region(z->resource.start, 256, "wd33c93"))
return -EBUSY;
@@ -191,6 +230,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &z->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 86f1da22aaa5..2c5cb1a02e86 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -7,13 +7,18 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "a3000.h"
@@ -21,8 +26,11 @@
struct a3000_hostdata {
struct WD33C93_hostdata wh;
struct a3000_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
static irqreturn_t a3000_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -44,37 +52,65 @@ static irqreturn_t a3000_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ dma_addr_t addr;
+
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
/*
* if the physical address has the wrong alignment, or if
* physical address is bad, or if it is a write and at the
* end of a physical memory chunk, then allocate a bounce
* buffer
+ * MSch 20220629 - only wrong alignment tested - bounce
+ * buffer returned by kmalloc is guaranteed to be aligned
*/
if (addr & A3000_XFER_MASK) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ WARN_ONCE(1, "Invalid alignment for DMA!");
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL);
/* can't allocate memory; use PIO */
if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0;
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
return 1;
}
if (!dir_in) {
/* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
}
/* setup dma direction */
@@ -89,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
/* start DMA */
mb(); /* make sure setup is completed */
@@ -109,6 +139,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs;
@@ -145,12 +176,16 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
regs->CNTR = CNTR_PDMD | CNTR_INTEN;
mb(); /* make sure CNTR is updated before next IO */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (SCpnt) {
if (wh->dma_dir && SCpnt)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
kfree(wh->dma_bounce_buffer);
wh->dma_bounce_buffer = NULL;
wh->dma_bounce_len = 0;
@@ -175,6 +210,7 @@ static struct scsi_host_template amiga_a3000_scsi_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
@@ -186,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wd33c93_regs wdregs;
struct a3000_hostdata *hdata;
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
+ return -ENODEV;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
@@ -209,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
+ hdata->dev = &pdev->dev;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 59f6b7b2a70a..4d4cb47b3846 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -271,7 +271,7 @@ MODULE_PARM_DESC(msi, "IRQ handling."
" 0=PIC(default), 1=MSI, 2=MSI-X)");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
- " adapter to have it's kernel up and\n"
+ " adapter to have its kernel up and\n"
"running. This is typically adjusted for large systems that do not"
" have a BIOS.");
module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
@@ -338,7 +338,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
aac_fib_complete(fibptr);
return 0;
}
- scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
+ aac_priv(scsicmd)->owner = AAC_OWNER_MIDLEVEL;
device = scsicmd->device;
if (unlikely(!device)) {
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
@@ -592,7 +592,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_READ_NAME);
@@ -634,14 +634,15 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
{
struct fsa_dev_info *fsa_dev_ptr;
int (*callback)(struct scsi_cmnd *);
- struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+ struct scsi_cmnd *scsicmd = context;
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
int i;
if (!aac_valid_context(scsicmd, fibptr))
return;
- scsicmd->SCp.Status = 0;
+ cmd_priv->status = 0;
fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
@@ -679,12 +680,12 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
}
if ((fsa_dev_ptr->valid & 1) == 0)
fsa_dev_ptr->valid = 0;
- scsicmd->SCp.Status = le32_to_cpu(dresp->count);
+ cmd_priv->status = le32_to_cpu(dresp->count);
}
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
- callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
- scsicmd->SCp.ptr = NULL;
+ callback = cmd_priv->callback;
+ cmd_priv->callback = NULL;
(*callback)(scsicmd);
return;
}
@@ -722,7 +723,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
fibptr,
@@ -743,6 +744,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
{
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
struct fib * fibptr;
int status = -ENOMEM;
@@ -761,8 +763,8 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
dinfo->count = cpu_to_le32(scmd_id(scsicmd));
dinfo->type = cpu_to_le32(FT_FILESYS);
- scsicmd->SCp.ptr = (char *)callback;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ cmd_priv->callback = callback;
+ cmd_priv->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
fibptr,
@@ -778,7 +780,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
return 0;
if (status < 0) {
- scsicmd->SCp.ptr = NULL;
+ cmd_priv->callback = NULL;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
}
@@ -817,6 +819,7 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
int aac_probe_container(struct aac_dev *dev, int cid)
{
struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
+ struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
@@ -835,7 +838,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
while (scsicmd->device == scsidev)
schedule();
kfree(scsidev);
- status = scsicmd->SCp.Status;
+ status = cmd_priv->status;
kfree(scsicmd);
return status;
}
@@ -1047,7 +1050,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
vpdpage83data.type1.productid));
/* Convert to ascii based serial number.
- * The LSB is the the end.
+ * The LSB is the end.
*/
for (i = 0; i < 8; i++) {
u8 temp =
@@ -1128,7 +1131,7 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
dinfo->command = cpu_to_le32(VM_ContainerConfig);
dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_fib_send(ContainerCommand,
cmd_fibcontext,
@@ -2486,7 +2489,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
* Alocate and initialize a Fib
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
/*
@@ -2577,7 +2580,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
/*
@@ -2660,7 +2663,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
synchronizecmd->count =
cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
/*
* Now send the Fib to the adapter
@@ -2736,7 +2739,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
pmcmd->cid = cpu_to_le32(sdev_id(sdev));
pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
/*
* Now send the Fib to the adapter
@@ -3695,7 +3698,7 @@ out:
aac_fib_complete(fibptr);
if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
- scsicmd->SCp.sent_command = 1;
+ aac_priv(scsicmd)->sent_command = 1;
else
aac_scsi_done(scsicmd);
}
@@ -3725,7 +3728,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
/*
@@ -3769,7 +3772,7 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
if (!cmd_fibcontext)
return -1;
- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+ aac_priv(scsicmd)->owner = AAC_OWNER_FIRMWARE;
status = aac_adapter_hba(cmd_fibcontext, scsicmd);
/*
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3733df77bc65..5e115e8b2ba4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -29,6 +29,7 @@
#include <linux/completion.h>
#include <linux/pci.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -120,7 +121,7 @@ enum {
#define SA_AIF_PDEV_CHANGE (1<<4)
#define SA_AIF_LDEV_CHANGE (1<<5)
#define SA_AIF_BPSTAT_CHANGE (1<<30)
-#define SA_AIF_BPCFG_CHANGE (1<<31)
+#define SA_AIF_BPCFG_CHANGE (1U<<31)
#define HBA_MAX_SG_EMBEDDED 28
#define HBA_MAX_SG_SEPARATE 90
@@ -2673,11 +2674,24 @@ static inline void aac_cancel_rescan_worker(struct aac_dev *dev)
cancel_delayed_work_sync(&dev->src_reinit_aif_worker);
}
-/* SCp.phase values */
-#define AAC_OWNER_MIDLEVEL 0x101
-#define AAC_OWNER_LOWLEVEL 0x102
-#define AAC_OWNER_ERROR_HANDLER 0x103
-#define AAC_OWNER_FIRMWARE 0x106
+enum aac_cmd_owner {
+ AAC_OWNER_MIDLEVEL = 0x101,
+ AAC_OWNER_LOWLEVEL = 0x102,
+ AAC_OWNER_ERROR_HANDLER = 0x103,
+ AAC_OWNER_FIRMWARE = 0x106,
+};
+
+struct aac_cmd_priv {
+ int (*callback)(struct scsi_cmnd *);
+ int status;
+ enum aac_cmd_owner owner;
+ bool sent_command;
+};
+
+static inline struct aac_cmd_priv *aac_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
void aac_safw_rescan_worker(struct work_struct *work);
void aac_src_reinit_aif_worker(struct work_struct *work);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 355b16f0b145..bd99c5492b7d 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -272,11 +272,11 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
-static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data, bool rsvd)
+static bool wait_for_io_iter(struct scsi_cmnd *cmd, void *data)
{
int *active = data;
- if (cmd->SCp.phase == AAC_OWNER_FIRMWARE)
+ if (aac_priv(cmd)->owner == AAC_OWNER_FIRMWARE)
*active = *active + 1;
return true;
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a911252075a6..5ba5c18b77b4 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -241,10 +241,9 @@ static struct aac_driver_ident aac_drivers[] = {
static int aac_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *cmd)
{
- int r = 0;
- cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
- r = (aac_scsi_cmd(cmd) ? FAILED : 0);
- return r;
+ aac_priv(cmd)->owner = AAC_OWNER_LOWLEVEL;
+
+ return aac_scsi_cmd(cmd) ? FAILED : 0;
}
/**
@@ -634,11 +633,11 @@ struct fib_count_data {
int krlcnt;
};
-static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data, bool reserved)
+static bool fib_count_iter(struct scsi_cmnd *scmnd, void *data)
{
struct fib_count_data *fib_count = data;
- switch (scmnd->SCp.phase) {
+ switch (aac_priv(scmnd)->owner) {
case AAC_OWNER_FIRMWARE:
fib_count->fwcnt++;
break;
@@ -680,6 +679,7 @@ static int get_num_of_incomplete_fibs(struct aac_dev *aac)
static int aac_eh_abort(struct scsi_cmnd* cmd)
{
+ struct aac_cmd_priv *cmd_priv = aac_priv(cmd);
struct scsi_device * dev = cmd->device;
struct Scsi_Host * host = dev->host;
struct aac_dev * aac = (struct aac_dev *)host->hostdata;
@@ -732,7 +732,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
fib->hbacmd_size = sizeof(*tmf);
- cmd->SCp.sent_command = 0;
+ cmd_priv->sent_command = 0;
status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
(fib_callback) aac_hba_callback,
@@ -744,7 +744,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
}
/* Wait up to 15 secs for completion */
for (count = 0; count < 15; ++count) {
- if (cmd->SCp.sent_command) {
+ if (cmd_priv->sent_command) {
ret = SUCCESS;
break;
}
@@ -784,7 +784,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib->callback_data == cmd)) {
fib->flags |=
FIB_CONTEXT_FLAG_TIMED_OUT;
- cmd->SCp.phase =
+ cmd_priv->owner =
AAC_OWNER_ERROR_HANDLER;
ret = SUCCESS;
}
@@ -811,7 +811,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(command->device == cmd->device)) {
fib->flags |=
FIB_CONTEXT_FLAG_TIMED_OUT;
- command->SCp.phase =
+ aac_priv(command)->owner =
AAC_OWNER_ERROR_HANDLER;
if (command == cmd)
ret = SUCCESS;
@@ -864,7 +864,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
fib->hbacmd_size = sizeof(*rst);
- return HBA_IU_TYPE_SATA_REQ;
+ return HBA_IU_TYPE_SATA_REQ;
}
static void aac_tmf_callback(void *context, struct fib *fibptr)
@@ -1058,7 +1058,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
- cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+ aac_priv(cmd)->owner = AAC_OWNER_ERROR_HANDLER;
}
}
}
@@ -1507,6 +1507,7 @@ static struct scsi_host_template aac_driver_template = {
#endif
.emulated = 1,
.no_write_same = 1,
+ .cmd_size = sizeof(struct aac_cmd_priv),
};
static void __aac_shutdown(struct aac_dev * aac)
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ace5eff828e9..f301aec044bb 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2277,6 +2277,15 @@ struct asc_board {
dvc_var.adv_dvc_var)
#define adv_dvc_to_pdev(adv_dvc) to_pci_dev(adv_dvc_to_board(adv_dvc)->dev)
+struct advansys_cmd {
+ dma_addr_t dma_handle;
+};
+
+static struct advansys_cmd *advansys_cmd(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#ifdef ADVANSYS_DEBUG
static int asc_dbglvl = 3;
@@ -6681,7 +6690,7 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
ASC_STATS(boardp->shost, callback);
- dma_unmap_single(boardp->dev, scp->SCp.dma_handle,
+ dma_unmap_single(boardp->dev, advansys_cmd(scp)->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
/*
* 'qdonep' contains the command's ending status.
@@ -7399,15 +7408,15 @@ static int advansys_slave_configure(struct scsi_device *sdev)
static __le32 asc_get_sense_buffer_dma(struct scsi_cmnd *scp)
{
struct asc_board *board = shost_priv(scp->device->host);
+ struct advansys_cmd *acmd = advansys_cmd(scp);
- scp->SCp.dma_handle = dma_map_single(board->dev, scp->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(board->dev, scp->SCp.dma_handle)) {
+ acmd->dma_handle = dma_map_single(board->dev, scp->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(board->dev, acmd->dma_handle)) {
ASC_DBG(1, "failed to map sense buffer\n");
return 0;
}
- return cpu_to_le32(scp->SCp.dma_handle);
+ return cpu_to_le32(acmd->dma_handle);
}
static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
@@ -10604,6 +10613,7 @@ static struct scsi_host_template advansys_template = {
.eh_host_reset_handler = advansys_reset,
.bios_param = advansys_biosparam,
.slave_configure = advansys_slave_configure,
+ .cmd_size = sizeof(struct advansys_cmd),
};
static int advansys_wide_init_chip(struct Scsi_Host *shost)
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index d17880b57d17..caeebfb67149 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -243,13 +243,16 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <scsi/scsicam.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_spi.h>
-#include <scsi/scsi_eh.h>
+#include <scsi/scsicam.h>
#include "aha152x.h"
static LIST_HEAD(aha152x_host_list);
@@ -313,6 +316,21 @@ enum {
check_condition = 0x0800, /* requesting sense after CHECK CONDITION */
};
+struct aha152x_cmd_priv {
+ char *ptr;
+ int this_residual;
+ struct scatterlist *buffer;
+ int status;
+ int message;
+ int sent_command;
+ int phase;
+};
+
+static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
MODULE_AUTHOR("Jürgen Fischer");
MODULE_DESCRIPTION(AHA152X_REVID);
MODULE_LICENSE("GPL");
@@ -876,14 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt)
static int setup_expected_interrupts(struct Scsi_Host *shpnt)
{
if(CURRENT_SC) {
- CURRENT_SC->SCp.phase |= 1 << 16;
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
- if(CURRENT_SC->SCp.phase & selecting) {
+ acp->phase |= 1 << 16;
+
+ if (acp->phase & selecting) {
SETPORT(SSTAT1, SELTO);
SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
SETPORT(SIMODE1, ENSELTIMO);
} else {
- SETPORT(SIMODE0, (CURRENT_SC->SCp.phase & spiordy) ? ENSPIORDY : 0);
+ SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0);
SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
}
} else if(STATE==seldi) {
@@ -907,16 +927,16 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
struct completion *complete, int phase)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt);
struct Scsi_Host *shpnt = SCpnt->device->host;
unsigned long flags;
- SCpnt->SCp.phase = not_issued | phase;
- SCpnt->SCp.Status = 0x1; /* Ilegal status by SCSI standard */
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.have_data_in = 0;
- SCpnt->SCp.sent_command = 0;
+ acp->phase = not_issued | phase;
+ acp->status = 0x1; /* Illegal status by SCSI standard */
+ acp->message = 0;
+ acp->sent_command = 0;
- if(SCpnt->SCp.phase & (resetting|check_condition)) {
+ if (acp->phase & (resetting | check_condition)) {
if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
return FAILED;
@@ -939,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
SCp.phase : current state of the command */
if ((phase & resetting) || !scsi_sglist(SCpnt)) {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
+ acp->ptr = NULL;
+ acp->this_residual = 0;
scsi_set_resid(SCpnt, 0);
- SCpnt->SCp.buffer = NULL;
+ acp->buffer = NULL;
} else {
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.ptr = SG_ADDRESS(SCpnt->SCp.buffer);
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ acp->buffer = scsi_sglist(SCpnt);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
DO_LOCK(flags);
@@ -997,7 +1017,7 @@ static void reset_done(struct scsi_cmnd *SCpnt)
static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
{
- if (SCpnt->SCp.phase & resetting)
+ if (aha152x_priv(SCpnt)->phase & resetting)
reset_done(SCpnt);
else
scsi_done(SCpnt);
@@ -1083,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
DO_LOCK(flags);
- if(SCpnt->SCp.phase & resetted) {
+ if (aha152x_priv(SCpnt)->phase & resetted) {
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0);
@@ -1377,28 +1397,30 @@ static void busfree_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRBUSFREE);
if(CURRENT_SC) {
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
#if defined(AHA152X_STAT)
action++;
#endif
- CURRENT_SC->SCp.phase &= ~syncneg;
+ acp->phase &= ~syncneg;
- if(CURRENT_SC->SCp.phase & completed) {
+ if (acp->phase & completed) {
/* target sent COMMAND COMPLETE */
- done(shpnt, CURRENT_SC->SCp.Status, DID_OK);
+ done(shpnt, acp->status, DID_OK);
- } else if(CURRENT_SC->SCp.phase & aborted) {
- done(shpnt, CURRENT_SC->SCp.Status, DID_ABORT);
+ } else if (acp->phase & aborted) {
+ done(shpnt, acp->status, DID_ABORT);
- } else if(CURRENT_SC->SCp.phase & resetted) {
- done(shpnt, CURRENT_SC->SCp.Status, DID_RESET);
+ } else if (acp->phase & resetted) {
+ done(shpnt, acp->status, DID_RESET);
- } else if(CURRENT_SC->SCp.phase & disconnected) {
+ } else if (acp->phase & disconnected) {
/* target sent DISCONNECT */
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->disconnections++;
#endif
append_SC(&DISCONNECTED_SC, CURRENT_SC);
- CURRENT_SC->SCp.phase |= 1 << 16;
+ acp->phase |= 1 << 16;
CURRENT_SC = NULL;
} else {
@@ -1417,23 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
action++;
#endif
- if(DONE_SC->SCp.phase & check_condition) {
+ if (aha152x_priv(DONE_SC)->phase & check_condition) {
struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
struct aha152x_scdata *sc = SCDATA(cmd);
scsi_eh_restore_cmnd(cmd, &sc->ses);
- cmd->SCp.Status = SAM_STAT_CHECK_CONDITION;
+ aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION;
HOSTDATA(shpnt)->commands--;
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- } else if(DONE_SC->SCp.Status==SAM_STAT_CHECK_CONDITION) {
+ } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) {
#if defined(AHA152X_STAT)
HOSTDATA(shpnt)->busfree_with_check_condition++;
#endif
- if(!(DONE_SC->SCp.phase & not_issued)) {
+ if (!(aha152x_priv(DONE_SC)->phase & not_issued)) {
struct aha152x_scdata *sc;
struct scsi_cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -1458,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
if (!HOSTDATA(shpnt)->commands)
SETPORT(PORTA, 0); /* turn led off */
- if (!(ptr->SCp.phase & resetting)) {
+ if (!(aha152x_priv(ptr)->phase & resetting)) {
kfree(ptr->host_scribble);
ptr->host_scribble=NULL;
}
@@ -1481,10 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
DO_UNLOCK(flags);
if(CURRENT_SC) {
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
#if defined(AHA152X_STAT)
action++;
#endif
- CURRENT_SC->SCp.phase |= selecting;
+ acp->phase |= selecting;
/* clear selection timeout */
SETPORT(SSTAT1, SELTO);
@@ -1512,11 +1536,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
*/
static void seldo_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
SETPORT(SCSISIG, 0);
SETPORT(SSTAT1, CLRBUSFREE);
SETPORT(SSTAT1, CLRPHASECHG);
- CURRENT_SC->SCp.phase &= ~(selecting|not_issued);
+ acp->phase &= ~(selecting | not_issued);
SETPORT(SCSISEQ, 0);
@@ -1531,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
- if (CURRENT_SC->SCp.phase & aborting) {
+ if (acp->phase & aborting) {
ADDMSGO(ABORT);
- } else if (CURRENT_SC->SCp.phase & resetting) {
+ } else if (acp->phase & resetting) {
ADDMSGO(BUS_DEVICE_RESET);
} else if (SYNCNEG==0 && SYNCHRONOUS) {
- CURRENT_SC->SCp.phase |= syncneg;
+ acp->phase |= syncneg;
MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
SYNCNEG=1; /* negotiation in progress */
}
@@ -1551,15 +1577,18 @@ static void seldo_run(struct Scsi_Host *shpnt)
*/
static void selto_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp;
+
SETPORT(SCSISEQ, 0);
SETPORT(SSTAT1, CLRSELTIMO);
if (!CURRENT_SC)
return;
- CURRENT_SC->SCp.phase &= ~selecting;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->phase &= ~selecting;
- if (CURRENT_SC->SCp.phase & aborted)
+ if (acp->phase & aborted)
done(shpnt, SAM_STAT_GOOD, DID_ABORT);
else if (TESTLO(SSTAT0, SELINGO))
done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
@@ -1587,7 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt)
SETPORT(SSTAT1, CLRPHASECHG);
if(CURRENT_SC) {
- if(!(CURRENT_SC->SCp.phase & not_issued))
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
+ if (!(acp->phase & not_issued))
scmd_printk(KERN_ERR, CURRENT_SC,
"command should not have been issued yet\n");
@@ -1644,6 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
static void msgi_run(struct Scsi_Host *shpnt)
{
for(;;) {
+ struct aha152x_cmd_priv *acp;
int sstat1 = GETPORT(SSTAT1);
if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
@@ -1681,8 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- CURRENT_SC->SCp.Message = MSGI(0);
- CURRENT_SC->SCp.phase &= ~disconnected;
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
+ acp->phase &= ~disconnected;
MSGILEN=0;
@@ -1690,7 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
continue;
}
- CURRENT_SC->SCp.Message = MSGI(0);
+ acp = aha152x_priv(CURRENT_SC);
+ acp->message = MSGI(0);
switch (MSGI(0)) {
case DISCONNECT:
@@ -1698,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
scmd_printk(KERN_WARNING, CURRENT_SC,
"target was not allowed to disconnect\n");
- CURRENT_SC->SCp.phase |= disconnected;
+ acp->phase |= disconnected;
break;
case COMMAND_COMPLETE:
- CURRENT_SC->SCp.phase |= completed;
+ acp->phase |= completed;
break;
case MESSAGE_REJECT:
@@ -1833,7 +1867,8 @@ static void msgi_end(struct Scsi_Host *shpnt)
static void msgo_init(struct Scsi_Host *shpnt)
{
if(MSGOLEN==0) {
- if((CURRENT_SC->SCp.phase & syncneg) && SYNCNEG==2 && SYNCRATE==0) {
+ if ((aha152x_priv(CURRENT_SC)->phase & syncneg) &&
+ SYNCNEG == 2 && SYNCRATE == 0) {
ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
} else {
scmd_printk(KERN_INFO, CURRENT_SC,
@@ -1850,6 +1885,8 @@ static void msgo_init(struct Scsi_Host *shpnt)
*/
static void msgo_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
while(MSGO_I<MSGOLEN) {
if (TESTLO(SSTAT0, SPIORDY))
return;
@@ -1861,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
if (MSGO(MSGO_I) & IDENTIFY_BASE)
- CURRENT_SC->SCp.phase |= identified;
+ acp->phase |= identified;
if (MSGO(MSGO_I)==ABORT)
- CURRENT_SC->SCp.phase |= aborted;
+ acp->phase |= aborted;
if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
- CURRENT_SC->SCp.phase |= resetted;
+ acp->phase |= resetted;
SETPORT(SCSIDAT, MSGO(MSGO_I++));
}
@@ -1896,7 +1933,7 @@ static void msgo_end(struct Scsi_Host *shpnt)
*/
static void cmd_init(struct Scsi_Host *shpnt)
{
- if (CURRENT_SC->SCp.sent_command) {
+ if (aha152x_priv(CURRENT_SC)->sent_command) {
scmd_printk(KERN_ERR, CURRENT_SC,
"command already sent\n");
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
@@ -1927,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
"command sent incompletely (%d/%d)\n",
CMD_I, CURRENT_SC->cmd_len);
else
- CURRENT_SC->SCp.sent_command++;
+ aha152x_priv(CURRENT_SC)->sent_command++;
}
/*
@@ -1939,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt)
if (TESTLO(SSTAT0, SPIORDY))
return;
- CURRENT_SC->SCp.Status = GETPORT(SCSIDAT);
+ aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT);
}
@@ -1963,6 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt)
static void datai_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp;
unsigned long the_time;
int fifodata, data_count;
@@ -2000,35 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt)
fifodata = GETPORT(FIFOSTAT);
}
- if(CURRENT_SC->SCp.this_residual>0) {
- while(fifodata>0 && CURRENT_SC->SCp.this_residual>0) {
- data_count = fifodata > CURRENT_SC->SCp.this_residual ?
- CURRENT_SC->SCp.this_residual :
- fifodata;
+ acp = aha152x_priv(CURRENT_SC);
+ if (acp->this_residual > 0) {
+ while (fifodata > 0 && acp->this_residual > 0) {
+ data_count = fifodata > acp->this_residual ?
+ acp->this_residual : fifodata;
fifodata -= data_count;
if (data_count & 1) {
SETPORT(DMACNTRL0, ENDMA|_8BIT);
- *CURRENT_SC->SCp.ptr++ = GETPORT(DATAPORT);
- CURRENT_SC->SCp.this_residual--;
+ *acp->ptr++ = GETPORT(DATAPORT);
+ acp->this_residual--;
DATA_LEN++;
SETPORT(DMACNTRL0, ENDMA);
}
if (data_count > 1) {
data_count >>= 1;
- insw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
- CURRENT_SC->SCp.ptr += 2 * data_count;
- CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ insw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
DATA_LEN += 2 * data_count;
}
- if (CURRENT_SC->SCp.this_residual == 0 &&
- !sg_is_last(CURRENT_SC->SCp.buffer)) {
+ if (acp->this_residual == 0 &&
+ !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
}
} else if (fifodata > 0) {
@@ -2096,14 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt)
static void datao_run(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
unsigned long the_time;
int data_count;
/* until phase changes or all data sent */
- while(TESTLO(DMASTAT, INTSTAT) && CURRENT_SC->SCp.this_residual>0) {
+ while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) {
data_count = 128;
- if(data_count > CURRENT_SC->SCp.this_residual)
- data_count=CURRENT_SC->SCp.this_residual;
+ if (data_count > acp->this_residual)
+ data_count = acp->this_residual;
if(TESTLO(DMASTAT, DFIFOEMP)) {
scmd_printk(KERN_ERR, CURRENT_SC,
@@ -2114,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt)
if(data_count & 1) {
SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
- SETPORT(DATAPORT, *CURRENT_SC->SCp.ptr++);
- CURRENT_SC->SCp.this_residual--;
+ SETPORT(DATAPORT, *acp->ptr++);
+ acp->this_residual--;
CMD_INC_RESID(CURRENT_SC, -1);
SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
}
if(data_count > 1) {
data_count >>= 1;
- outsw(DATAPORT, CURRENT_SC->SCp.ptr, data_count);
- CURRENT_SC->SCp.ptr += 2 * data_count;
- CURRENT_SC->SCp.this_residual -= 2 * data_count;
+ outsw(DATAPORT, acp->ptr, data_count);
+ acp->ptr += 2 * data_count;
+ acp->this_residual -= 2 * data_count;
CMD_INC_RESID(CURRENT_SC, -2 * data_count);
}
- if (CURRENT_SC->SCp.this_residual == 0 &&
- !sg_is_last(CURRENT_SC->SCp.buffer)) {
+ if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) {
/* advance to next buffer */
- CURRENT_SC->SCp.buffer = sg_next(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer);
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length;
+ acp->buffer = sg_next(acp->buffer);
+ acp->ptr = SG_ADDRESS(acp->buffer);
+ acp->this_residual = acp->buffer->length;
}
the_time=jiffies + 100*HZ;
@@ -2149,6 +2187,8 @@ static void datao_run(struct Scsi_Host *shpnt)
static void datao_end(struct Scsi_Host *shpnt)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
+
if(TESTLO(DMASTAT, DFIFOEMP)) {
u32 datao_cnt = GETSTCNT();
int datao_out = DATA_LEN - scsi_get_resid(CURRENT_SC);
@@ -2166,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt)
sg = sg_next(sg);
}
- CURRENT_SC->SCp.buffer = sg;
- CURRENT_SC->SCp.ptr = SG_ADDRESS(CURRENT_SC->SCp.buffer) + done;
- CURRENT_SC->SCp.this_residual = CURRENT_SC->SCp.buffer->length -
- done;
+ acp->buffer = sg;
+ acp->ptr = SG_ADDRESS(acp->buffer) + done;
+ acp->this_residual = acp->buffer->length - done;
}
SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2197,7 +2236,8 @@ static int update_state(struct Scsi_Host *shpnt)
SETPORT(SSTAT1,SCSIRSTI);
} else if (stat0 & SELDI && PREVSTATE == busfree) {
STATE=seldi;
- } else if(stat0 & SELDO && CURRENT_SC && (CURRENT_SC->SCp.phase & selecting)) {
+ } else if (stat0 & SELDO && CURRENT_SC &&
+ (aha152x_priv(CURRENT_SC)->phase & selecting)) {
STATE=seldo;
} else if(stat1 & SELTO) {
STATE=selto;
@@ -2329,7 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(SXFRCTL0, CH1);
SETPORT(DMACNTRL0, 0);
if(CURRENT_SC)
- CURRENT_SC->SCp.phase &= ~spiordy;
+ aha152x_priv(CURRENT_SC)->phase &= ~spiordy;
}
/*
@@ -2351,7 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt)
SETPORT(DMACNTRL0, 0);
SETPORT(SXFRCTL0, CH1|SPIOEN);
if(CURRENT_SC)
- CURRENT_SC->SCp.phase |= spiordy;
+ aha152x_priv(CURRENT_SC)->phase |= spiordy;
}
/*
@@ -2441,21 +2481,23 @@ static void disp_enintr(struct Scsi_Host *shpnt)
*/
static void show_command(struct scsi_cmnd *ptr)
{
+ const int phase = aha152x_priv(ptr)->phase;
+
scsi_print_command(ptr);
scmd_printk(KERN_DEBUG, ptr,
"request_bufflen=%d; resid=%d; "
"phase |%s%s%s%s%s%s%s%s%s; next=0x%p",
scsi_bufflen(ptr), scsi_get_resid(ptr),
- (ptr->SCp.phase & not_issued) ? "not issued|" : "",
- (ptr->SCp.phase & selecting) ? "selecting|" : "",
- (ptr->SCp.phase & identified) ? "identified|" : "",
- (ptr->SCp.phase & disconnected) ? "disconnected|" : "",
- (ptr->SCp.phase & completed) ? "completed|" : "",
- (ptr->SCp.phase & spiordy) ? "spiordy|" : "",
- (ptr->SCp.phase & syncneg) ? "syncneg|" : "",
- (ptr->SCp.phase & aborted) ? "aborted|" : "",
- (ptr->SCp.phase & resetted) ? "resetted|" : "",
- (SCDATA(ptr)) ? SCNEXT(ptr) : NULL);
+ phase & not_issued ? "not issued|" : "",
+ phase & selecting ? "selecting|" : "",
+ phase & identified ? "identified|" : "",
+ phase & disconnected ? "disconnected|" : "",
+ phase & completed ? "completed|" : "",
+ phase & spiordy ? "spiordy|" : "",
+ phase & syncneg ? "syncneg|" : "",
+ phase & aborted ? "aborted|" : "",
+ phase & resetted ? "resetted|" : "",
+ SCDATA(ptr) ? SCNEXT(ptr) : NULL);
}
/*
@@ -2487,6 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt)
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
{
+ struct aha152x_cmd_priv *acp = aha152x_priv(ptr);
+ const int phase = acp->phase;
int i;
seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
@@ -2496,24 +2540,24 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
seq_printf(m, "0x%02x ", ptr->cmnd[i]);
seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
- scsi_get_resid(ptr), ptr->SCp.this_residual,
- sg_nents(ptr->SCp.buffer) - 1);
+ scsi_get_resid(ptr), acp->this_residual,
+ sg_nents(acp->buffer) - 1);
- if (ptr->SCp.phase & not_issued)
+ if (phase & not_issued)
seq_puts(m, "not issued|");
- if (ptr->SCp.phase & selecting)
+ if (phase & selecting)
seq_puts(m, "selecting|");
- if (ptr->SCp.phase & disconnected)
+ if (phase & disconnected)
seq_puts(m, "disconnected|");
- if (ptr->SCp.phase & aborted)
+ if (phase & aborted)
seq_puts(m, "aborted|");
- if (ptr->SCp.phase & identified)
+ if (phase & identified)
seq_puts(m, "identified|");
- if (ptr->SCp.phase & completed)
+ if (phase & completed)
seq_puts(m, "completed|");
- if (ptr->SCp.phase & spiordy)
+ if (phase & spiordy)
seq_puts(m, "spiordy|");
- if (ptr->SCp.phase & syncneg)
+ if (phase & syncneg)
seq_puts(m, "syncneg|");
seq_printf(m, "; next=0x%p\n", SCNEXT(ptr));
}
@@ -2918,6 +2962,7 @@ static struct scsi_host_template aha152x_driver_template = {
.sg_tablesize = SG_ALL,
.dma_boundary = PAGE_SIZE - 1,
.slave_alloc = aha152x_adjust_queue,
+ .cmd_size = sizeof(struct aha152x_cmd_priv),
};
#if !defined(AHA152X_PCMCIA)
@@ -3375,13 +3420,11 @@ static int __init aha152x_setup(char *str)
setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1;
setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT;
setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0;
- if (ints[0] > 8) { /*}*/
+ if (ints[0] > 8)
printk(KERN_NOTICE "aha152x: usage: aha152x=<IOBASE>[,<IRQ>[,<SCSI ID>"
"[,<RECONNECT>[,<PARITY>[,<SYNCHRONOUS>[,<DELAY>[,<EXT_TRANS>]]]]]]]\n");
- } else {
+ else
setup_count++;
- return 0;
- }
return 1;
}
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index f0e8ae9f5e40..552ca95157da 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -206,7 +206,6 @@ static int makecode(unsigned hosterr, unsigned scsierr)
static int aha1542_test_port(struct Scsi_Host *sh)
{
- u8 inquiry_result[4];
int i;
/* Quick and dirty test for presence of the card. */
@@ -240,7 +239,7 @@ static int aha1542_test_port(struct Scsi_Host *sh)
for (i = 0; i < 4; i++) {
if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0))
return 0;
- inquiry_result[i] = inb(DATA(sh->io_port));
+ (void)inb(DATA(sh->io_port));
}
/* Reading port should reset DF */
@@ -303,7 +302,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
if (flag & SCRD)
printk("SCRD ");
printk("status %02x\n", inb(STATUS(sh->io_port)));
- };
+ }
#endif
number_serviced = 0;
@@ -345,7 +344,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
if (!number_serviced)
shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
return IRQ_HANDLED;
- };
+ }
mbo = (scsi2int(mb[mbi].ccbptr) - (unsigned long)aha1542->ccb_handle) / sizeof(struct ccb);
mbistatus = mb[mbi].status;
@@ -409,7 +408,7 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
*/
scsi_done(tmp_cmd);
number_serviced++;
- };
+ }
}
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
@@ -535,7 +534,7 @@ static void setup_mailboxes(struct Scsi_Host *sh)
any2scsi(aha1542->mb[i].ccbptr,
aha1542->ccb_handle + i * sizeof(struct ccb));
aha1542->mb[AHA1542_MAILBOXES + i].status = 0;
- };
+ }
aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */
any2scsi(mb_cmd + 2, aha1542->mb_handle);
if (aha1542_out(sh->io_port, mb_cmd, 5))
@@ -550,7 +549,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
i = inb(STATUS(sh->io_port));
if (i & DF) {
i = inb(DATA(sh->io_port));
- };
+ }
aha1542_outb(sh->io_port, CMD_RETCONF);
aha1542_in(sh->io_port, inquiry_result, 3, 0);
if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
@@ -579,7 +578,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
default:
shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n");
return -1;
- };
+ }
switch (inquiry_result[1]) {
case 0x40:
sh->irq = 15;
@@ -602,7 +601,7 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
default:
shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n");
return -1;
- };
+ }
sh->this_id = inquiry_result[2] & 7;
return 0;
}
@@ -637,7 +636,7 @@ static int aha1542_mbenable(struct Scsi_Host *sh)
if (aha1542_out(sh->io_port, mbenable_cmd, 3))
goto fail;
- };
+ }
while (0) {
fail:
shost_printk(KERN_ERR, sh, "Mailbox init failed\n");
@@ -655,7 +654,7 @@ static int aha1542_query(struct Scsi_Host *sh)
i = inb(STATUS(sh->io_port));
if (i & DF) {
i = inb(DATA(sh->io_port));
- };
+ }
aha1542_outb(sh->io_port, CMD_INQUIRY);
aha1542_in(sh->io_port, inquiry_result, 4, 0);
if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0))
@@ -674,7 +673,7 @@ static int aha1542_query(struct Scsi_Host *sh)
if (inquiry_result[0] == 0x43) {
shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n");
return 1;
- };
+ }
/*
* Always call this - boards that do not support extended bios translation
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 18eb4cfcef9a..134255751819 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -55,8 +55,12 @@
#include <asm/dma.h>
#include <asm/io.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "aha1740.h"
/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 5d566d2b2997..f2f3405cdec5 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -194,7 +194,7 @@ struct ahd_linux_iocell_opts
#define AIC79XX_PRECOMP_INDEX 0
#define AIC79XX_SLEWRATE_INDEX 1
#define AIC79XX_AMPLITUDE_INDEX 2
-static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+static struct ahd_linux_iocell_opts aic79xx_iocell_info[] __ro_after_init =
{
AIC79XX_DEFAULT_IOOPTS,
AIC79XX_DEFAULT_IOOPTS,
@@ -755,11 +755,7 @@ ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
static int
ahd_linux_abort(struct scsi_cmnd *cmd)
{
- int error;
-
- error = ahd_linux_queue_abort_cmd(cmd);
-
- return error;
+ return ahd_linux_queue_abort_cmd(cmd);
}
/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 679a4fd13874..793fe19993a9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
/****************************** PCI-X definitions *****************************/
#define PCIXR_COMMAND 0x96
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index 2f0bdb9225a4..5fad41b1ab58 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahd_compose_id(device,
vendor,
subdevice,
@@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
* Record if this is an HP board.
*/
subvendor = ahd_pci_read_config(ahd->dev_softc,
- PCIR_SUBVEND_0, /*bytes*/2);
+ PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
if (subvendor == SUBID_HP)
ahd->flags |= AHD_HP_BOARD;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 4782a304e93c..51d9f4de0734 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
/* config registers for header type 0 devices */
#define PCIR_MAPS 0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0 0x2e
typedef enum
{
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index dab3a6d12c4d..2d4c85426dc3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
- subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
- subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+ subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+ subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
/*
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 5f474e490f3e..cd692a4c5f85 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -283,7 +283,7 @@ main(int argc, char *argv[])
/*
* Decend the tree of scopes and insert/emit
* patches as appropriate. We perform a depth first
- * tranversal, recursively handling each scope.
+ * traversal, recursively handling each scope.
*/
/* start at the root scope */
dump_scope(SLIST_FIRST(&scope_stack));
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 8f24180646c2..f595bc2ee45e 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -60,7 +60,6 @@ void asd_set_dmamode(struct domain_device *dev);
/* ---------- TMFs ---------- */
int asd_abort_task(struct sas_task *);
int asd_abort_task_set(struct domain_device *, u8 *lun);
-int asd_clear_aca(struct domain_device *, u8 *lun);
int asd_clear_task_set(struct domain_device *, u8 *lun);
int asd_lu_reset(struct domain_device *, u8 *lun);
int asd_I_T_nexus_reset(struct domain_device *dev);
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 73506a459bf8..91d196f26b76 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -159,7 +159,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
flags |= OPEN_REQUIRED;
if ((dev->dev_type == SAS_SATA_DEV) ||
(dev->tproto & SAS_PROTOCOL_STP)) {
- struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
+ struct smp_rps_resp *rps_resp = &dev->sata_dev.rps_resp;
if (rps_resp->frame_type == SMP_RESPONSE &&
rps_resp->function == SMP_REPORT_PHY_SATA &&
rps_resp->result == SMP_RESP_FUNC_ACC) {
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 7a78606598c4..954d0c5ae2e2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -960,7 +960,6 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_abort_task = asd_abort_task,
.lldd_abort_task_set = asd_abort_task_set,
- .lldd_clear_aca = asd_clear_aca,
.lldd_clear_task_set = asd_clear_task_set,
.lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
.lldd_lu_reset = asd_lu_reset,
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index c6b63eae28f5..ed119a3f6f2e 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -322,7 +322,6 @@ Again:
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
struct completion *completion = ascb->completion;
@@ -532,7 +531,6 @@ int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
struct sas_task *t = task;
struct asd_ascb *ascb = NULL, *a;
struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
- unsigned long flags;
res = asd_can_queue(asd_ha, 1);
if (res)
@@ -575,10 +573,6 @@ int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
}
if (res)
goto out_err_unmap;
-
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&t->task_state_lock, flags);
}
list_del_init(&alist);
@@ -597,9 +591,6 @@ out_err_unmap:
if (a == b)
break;
t = a->uldd_task;
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&t->task_state_lock, flags);
switch (t->task_proto) {
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 0eb6e206a2b4..27d32b8c2987 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -287,7 +287,7 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
fh = edb->vaddr + 16;
ru = edb->vaddr + 16 + sizeof(*fh);
res = ru->status;
- if (ru->datapres == 1) /* Response data present */
+ if (ru->datapres == SAS_DATAPRES_RESPONSE_DATA)
res = ru->resp_data[3];
#if 0
ascb->tag = fh->tag;
@@ -644,15 +644,6 @@ int asd_abort_task_set(struct domain_device *dev, u8 *lun)
return res;
}
-int asd_clear_aca(struct domain_device *dev, u8 *lun)
-{
- int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
-
- if (res == TMF_RESP_FUNC_COMPLETE)
- asd_clear_nexus_I_T_L(dev, lun);
- return res;
-}
-
int asd_clear_task_set(struct domain_device *dev, u8 *lun)
{
int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 81eb3bbdfc51..7602639da9b3 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -126,13 +126,17 @@
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_spi.h>
#include "acornscsi.h"
#include "msgqueue.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
@@ -725,7 +729,7 @@ intr_ret_t acornscsi_kick(AS_Host *host)
*/
host->scsi.phase = PHASE_CONNECTING;
host->SCpnt = SCpnt;
- host->scsi.SCp = SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(SCpnt);
host->dma.xfer_setup = 0;
host->dma.xfer_required = 0;
host->dma.xfer_done = 0;
@@ -1420,6 +1424,7 @@ unsigned char acornscsi_readmessagebyte(AS_Host *host)
static
void acornscsi_message(AS_Host *host)
{
+ struct scsi_pointer *scsi_pointer;
unsigned char message[16];
unsigned int msgidx = 0, msglen = 1;
@@ -1489,8 +1494,9 @@ void acornscsi_message(AS_Host *host)
* the saved data pointer for the current I/O process.
*/
acornscsi_dma_cleanup(host);
- host->SCpnt->SCp = host->scsi.SCp;
- host->SCpnt->SCp.sent_command = 0;
+ scsi_pointer = arm_scsi_pointer(host->SCpnt);
+ *scsi_pointer = host->scsi.SCp;
+ scsi_pointer->sent_command = 0;
host->scsi.phase = PHASE_MSGIN;
break;
@@ -1505,7 +1511,7 @@ void acornscsi_message(AS_Host *host)
* the present command and status areas.'
*/
acornscsi_dma_cleanup(host);
- host->scsi.SCp = host->SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
host->scsi.phase = PHASE_MSGIN;
break;
@@ -1805,7 +1811,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
/*
* Restore data pointer from SAVED pointers.
*/
- host->scsi.SCp = host->SCpnt->SCp;
+ host->scsi.SCp = *arm_scsi_pointer(host->SCpnt);
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
printk(", data pointers: [%p, %X]",
host->scsi.SCp.ptr, host->scsi.SCp.this_residual);
@@ -2404,6 +2410,7 @@ acornscsi_intr(int irq, void *dev_id)
*/
static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
void (*done)(struct scsi_cmnd *) = scsi_done;
AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
@@ -2419,9 +2426,9 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt)
SCpnt->host_scribble = NULL;
SCpnt->result = 0;
- SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
- SCpnt->SCp.sent_command = 0;
- SCpnt->SCp.scsi_xferred = 0;
+ scsi_pointer->phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->scsi_xferred = 0;
init_SCp(SCpnt);
@@ -2787,6 +2794,7 @@ static struct scsi_host_template acornscsi_template = {
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.proc_name = "acornscsi",
+ .cmd_size = sizeof(struct arm_cmd_priv),
};
static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/arm_scsi.h
index 4d5ff7b4e864..ea9fcd92c6de 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/arm_scsi.h
@@ -1,16 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * linux/drivers/acorn/scsi/scsi.h
- *
* Copyright (C) 2002 Russell King
*
- * Commonly used scsi driver functions.
+ * Commonly used functions by the ARM SCSI-II drivers.
*/
#include <linux/scatterlist.h>
#define BELT_AND_BRACES
+struct arm_cmd_priv {
+ struct scsi_pointer scsi_pointer;
+};
+
+static inline struct scsi_pointer *arm_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ struct arm_cmd_priv *acmd = scsi_cmd_priv(cmd);
+
+ return &acmd->scsi_pointer;
+}
+
/*
* The scatter-gather list handling. This contains all
* the yucky stuff that needs to be fixed properly.
@@ -78,16 +87,18 @@ static inline void put_next_SCp_byte(struct scsi_pointer *SCp, unsigned char c)
static inline void init_SCp(struct scsi_cmnd *SCpnt)
{
- memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
+ memset(scsi_pointer, 0, sizeof(struct scsi_pointer));
if (scsi_bufflen(SCpnt)) {
unsigned long len = 0;
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
- SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.phase = scsi_bufflen(SCpnt);
+ scsi_pointer->buffer = scsi_sglist(SCpnt);
+ scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->phase = scsi_bufflen(SCpnt);
#ifdef BELT_AND_BRACES
{ /*
@@ -111,15 +122,15 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
* FIXME: Totaly naive fixup. We should abort
* with error
*/
- SCpnt->SCp.phase =
+ scsi_pointer->phase =
min_t(unsigned long, len,
scsi_bufflen(SCpnt));
}
}
#endif
} else {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
- SCpnt->SCp.phase = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->phase = 0;
}
}
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 7f667c198f6d..2527b542bcdd 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -35,8 +35,12 @@
#include <asm/io.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
struct arxescsi_info {
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 3fd944374631..5d4f67ba74c0 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -223,7 +223,7 @@ static struct scsi_host_template cumanascsi_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.proc_name = "CumanaSCSI-1",
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
.dma_boundary = PAGE_SIZE - 1,
};
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 3c00d7773876..d15053f02472 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -29,10 +29,14 @@
#include <asm/ecard.h>
#include <asm/io.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 1394590eecea..6f374af9f45f 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -35,10 +35,14 @@
#include <asm/dma.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 7019b91f0ce6..4ce0b2d73614 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -47,11 +47,15 @@
#include <asm/irq.h>
#include <asm/ecard.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
/* NOTE: SCSI2 Synchronous transfers *require* DMA according to
* the data sheet. This restriction is crazy, especially when
@@ -757,7 +761,7 @@ static void fas216_transfer(FAS216_Info *info)
fas216_log(info, LOG_ERROR, "null buffer passed to "
"fas216_starttransfer");
print_SCp(&info->scsi.SCp, "SCp: ", "\n");
- print_SCp(&info->SCpnt->SCp, "Cmnd SCp: ", "\n");
+ print_SCp(arm_scsi_pointer(info->SCpnt), "Cmnd SCp: ", "\n");
return;
}
@@ -1007,7 +1011,7 @@ fas216_reselected_intr(FAS216_Info *info)
/*
* Restore data pointer from SAVED data pointer
*/
- info->scsi.SCp = info->SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
fas216_log(info, LOG_CONNECT, "data pointers: [%p, %X]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
@@ -1050,6 +1054,7 @@ fas216_reselected_intr(FAS216_Info *info)
static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int msglen)
{
+ struct scsi_pointer *scsi_pointer;
int i;
switch (message[0]) {
@@ -1074,8 +1079,9 @@ static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int
* as required by the SCSI II standard. These always
* point to the start of their respective areas.
*/
- info->SCpnt->SCp = info->scsi.SCp;
- info->SCpnt->SCp.sent_command = 0;
+ scsi_pointer = arm_scsi_pointer(info->SCpnt);
+ *scsi_pointer = info->scsi.SCp;
+ scsi_pointer->sent_command = 0;
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
"save data pointers: [%p, %X]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
@@ -1088,7 +1094,7 @@ static void fas216_parse_message(FAS216_Info *info, unsigned char *message, int
/*
* Restore current data pointer from SAVED data pointer
*/
- info->scsi.SCp = info->SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(info->SCpnt);
fas216_log(info, LOG_CONNECT | LOG_MESSAGES | LOG_BUFFER,
"restore data pointers: [%p, 0x%x]",
info->scsi.SCp.ptr, info->scsi.SCp.this_residual);
@@ -1766,7 +1772,7 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
* claim host busy
*/
info->scsi.phase = PHASE_SELECTION;
- info->scsi.SCp = SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(SCpnt);
info->SCpnt = SCpnt;
info->dma.transfer_type = fasdma_none;
@@ -1845,7 +1851,7 @@ static void fas216_do_bus_device_reset(FAS216_Info *info,
* claim host busy
*/
info->scsi.phase = PHASE_SELECTION;
- info->scsi.SCp = SCpnt->SCp;
+ info->scsi.SCp = *arm_scsi_pointer(SCpnt);
info->SCpnt = SCpnt;
info->dma.transfer_type = fasdma_none;
@@ -1995,11 +2001,13 @@ static void fas216_devicereset_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
unsigned int result)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
"request sense complete, result=0x%04x%02x%02x",
- result, SCpnt->SCp.Message, SCpnt->SCp.Status);
+ result, scsi_pointer->Message, scsi_pointer->Status);
- if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD)
+ if (result != DID_OK || scsi_pointer->Status != SAM_STAT_GOOD)
/*
* Something went wrong. Make sure that we don't
* have valid data in the sense buffer that could
@@ -2029,6 +2037,8 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
static void
fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
{
+ struct scsi_pointer *scsi_pointer = arm_scsi_pointer(SCpnt);
+
info->stats.fins += 1;
set_host_byte(SCpnt, result);
@@ -2103,8 +2113,8 @@ request_sense:
fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
"requesting sense");
init_SCp(SCpnt);
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.Status = 0;
+ scsi_pointer->Message = 0;
+ scsi_pointer->Status = 0;
SCpnt->host_scribble = (void *)fas216_rq_sns_done;
/*
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index abf960487314..08113277a2a9 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -312,6 +312,10 @@ typedef struct {
/* driver-private data per SCSI command. */
struct fas216_cmd_priv {
+ /*
+ * @scsi_pointer must be the first member. See also arm_scsi_pointer().
+ */
+ struct scsi_pointer scsi_pointer;
void (*scsi_done)(struct scsi_cmnd *cmd);
};
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 78f33d57c3e8..f18a0620c808 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -113,7 +113,7 @@ static struct scsi_host_template oakscsi_template = {
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
.proc_name = "oakscsi",
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 8fec435cee18..7586d2a03812 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -20,10 +20,14 @@
#include <asm/ecard.h>
#include <asm/io.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "fas216.h"
-#include "scsi.h"
+#include "arm_scsi.h"
#include <scsi/scsicam.h>
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index c6f71a7d1b8e..978df23ce188 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -20,7 +20,11 @@
#include <linux/list.h>
#include <linux/init.h>
-#include "../scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#define DEBUG
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 95d7a3586083..d401cf27113a 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -538,7 +538,7 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int wanted_len = cmd->SCp.this_residual;
+ int wanted_len = NCR5380_to_ncmd(cmd)->this_residual;
int possible_len, limit;
if (wanted_len < DMA_MIN_SIZE)
@@ -610,7 +610,7 @@ static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
}
/* Last step: apply the hard limit on DMA transfers */
- limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ?
+ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(NCR5380_to_ncmd(cmd)->ptr))) ?
STRAM_BUFFER_SIZE : 255*512;
if (possible_len > limit)
possible_len = limit;
@@ -711,7 +711,7 @@ static struct scsi_host_template atari_scsi_template = {
.this_id = 7,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int __init atari_scsi_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index dcd6fae65a88..7143418d690f 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -614,7 +614,6 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
/**
* atp870u_queuecommand_lck - Queue SCSI command
* @req_p: request block
- * @done: completion function
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index ab55681145f8..50a577ac3bb4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -218,7 +218,7 @@ static char const *cqe_desc[] = {
static int beiscsi_eh_abort(struct scsi_cmnd *sc)
{
- struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
+ struct iscsi_task *abrt_task = iscsi_cmd(sc)->task;
struct iscsi_cls_session *cls_session;
struct beiscsi_io_task *abrt_io_task;
struct beiscsi_conn *beiscsi_conn;
@@ -231,6 +231,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
+completion_check:
/* check if we raced, task just got cleaned up under us */
spin_lock_bh(&session->back_lock);
if (!abrt_task || !abrt_task->sc) {
@@ -238,7 +239,13 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(abrt_task);
+ if (!iscsi_get_task(abrt_task)) {
+ spin_unlock(&session->back_lock);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(5);
+ goto completion_check;
+ }
+
abrt_io_task = abrt_task->dd_data;
conn = abrt_task->conn;
beiscsi_conn = conn->dd_data;
@@ -323,7 +330,15 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
}
/* get a task ref till FW processes the req for the ICD used */
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * The task has completed in the driver and is
+ * completing in libiscsi. Just ignore it here. When we
+ * call iscsi_eh_device_reset, it will wait for us.
+ */
+ continue;
+ }
+
io_task = task->dd_data;
/* mark WRB invalid which have been not processed by FW yet */
if (is_chip_be2_be3r(phba)) {
@@ -403,6 +418,7 @@ static struct scsi_host_template beiscsi_sht = {
.cmd_per_lun = BEISCSI_CMD_PER_LUN,
.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct scsi_transport_template *beiscsi_scsi_transport;
@@ -5744,7 +5760,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
cancel_work_sync(&phba->sess_work);
beiscsi_iface_destroy_default(phba);
- iscsi_host_remove(phba->shost);
+ iscsi_host_remove(phba->shost, false);
beiscsi_disable_port(phba, 1);
/* after cancelling boot_work */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 440ef32be048..e5aa982ffedc 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -732,9 +732,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-
if (rc) {
rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
@@ -1560,9 +1557,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
- DMA_BIT_MASK(32));
- if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index c8b947c16069..5a85401e9e2d 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -711,7 +711,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
- return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
+ return sysfs_emit(buf, "%s\n", serial_num);
}
static ssize_t
@@ -725,7 +725,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
char model[BFA_ADAPTER_MODEL_NAME_LEN];
bfa_get_adapter_model(&bfad->bfa, model);
- return snprintf(buf, PAGE_SIZE, "%s\n", model);
+ return sysfs_emit(buf, "%s\n", model);
}
static ssize_t
@@ -805,7 +805,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
- return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
+ return sysfs_emit(buf, "%s\n", model_descr);
}
static ssize_t
@@ -819,7 +819,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
u64 nwwn;
nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
- return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
+ return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn));
}
static ssize_t
@@ -836,7 +836,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
- return snprintf(buf, PAGE_SIZE, "%s\n", symname);
+ return sysfs_emit(buf, "%s\n", symname);
}
static ssize_t
@@ -850,14 +850,14 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
char hw_ver[BFA_VERSION_LEN];
bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
+ return sysfs_emit(buf, "%s\n", hw_ver);
}
static ssize_t
bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION);
}
static ssize_t
@@ -871,7 +871,7 @@ bfad_im_optionrom_version_show(struct device *dev,
char optrom_ver[BFA_VERSION_LEN];
bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
+ return sysfs_emit(buf, "%s\n", optrom_ver);
}
static ssize_t
@@ -885,7 +885,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
char fw_ver[BFA_VERSION_LEN];
bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
- return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
+ return sysfs_emit(buf, "%s\n", fw_ver);
}
static ssize_t
@@ -897,7 +897,7 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
bfa_get_nports(&bfad->bfa));
}
@@ -905,7 +905,7 @@ static ssize_t
bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME);
+ return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME);
}
static ssize_t
@@ -924,14 +924,14 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s),
GFP_ATOMIC);
if (rports == NULL)
- return snprintf(buf, PAGE_SIZE, "Failed\n");
+ return sysfs_emit(buf, "Failed\n");
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
kfree(rports);
- return snprintf(buf, PAGE_SIZE, "%d\n", nrports);
+ return sysfs_emit(buf, "%d\n", nrports);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
@@ -981,7 +981,7 @@ const struct attribute_group *bfad_im_host_groups[] = {
NULL
};
-struct attribute *bfad_im_vport_attrs[] = {
+static struct attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_model.attr,
&dev_attr_model_description.attr,
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index fd1b378a263a..52db147d9979 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -371,8 +371,7 @@ bfad_debugfs_release_fwtrc(struct inode *inode, struct file *file)
if (!fw_debug)
return 0;
- if (fw_debug->debug_buffer)
- vfree(fw_debug->debug_buffer);
+ vfree(fw_debug->debug_buffer);
file->private_data = NULL;
kfree(fw_debug);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 759d2bb1ecdd..c335f7a188d2 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -150,10 +150,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
wait_queue_head_t *wq;
- cmnd->SCp.Status |= tsk_status << 1;
- set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
- wq = (wait_queue_head_t *) cmnd->SCp.ptr;
- cmnd->SCp.ptr = NULL;
+ bfad_priv(cmnd)->status |= tsk_status << 1;
+ set_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status);
+ wq = bfad_priv(cmnd)->wq;
+ bfad_priv(cmnd)->wq = NULL;
if (wq)
wake_up(wq);
@@ -259,7 +259,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
* happens.
*/
cmnd->host_scribble = NULL;
- cmnd->SCp.Status = 0;
+ bfad_priv(cmnd)->status = 0;
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
/*
* bfa_itnim can be NULL if the port gets disconnected and the bfa
@@ -326,8 +326,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
* if happens.
*/
cmnd->host_scribble = NULL;
- cmnd->SCp.ptr = (char *)&wq;
- cmnd->SCp.Status = 0;
+ bfad_priv(cmnd)->wq = &wq;
+ bfad_priv(cmnd)->status = 0;
bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
/*
* bfa_itnim can be NULL if the port gets disconnected and the bfa
@@ -347,10 +347,9 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- wait_event(wq, test_bit(IO_DONE_BIT,
- (unsigned long *)&cmnd->SCp.Status));
+ wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status));
- task_status = cmnd->SCp.Status >> 1;
+ task_status = bfad_priv(cmnd)->status >> 1;
if (task_status != BFI_TSKIM_STS_OK) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"LUN reset failure, status: %d\n", task_status);
@@ -381,16 +380,16 @@ bfad_im_reset_target_handler(struct scsi_cmnd *cmnd)
spin_lock_irqsave(&bfad->bfad_lock, flags);
itnim = bfad_get_itnim(im_port, starget->id);
if (itnim) {
- cmnd->SCp.ptr = (char *)&wq;
+ bfad_priv(cmnd)->wq = &wq;
rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
if (rc == BFA_STATUS_OK) {
/* wait target reset to complete */
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_event(wq, test_bit(IO_DONE_BIT,
- (unsigned long *)&cmnd->SCp.Status));
+ &bfad_priv(cmnd)->status));
spin_lock_irqsave(&bfad->bfad_lock, flags);
- task_status = cmnd->SCp.Status >> 1;
+ task_status = bfad_priv(cmnd)->status >> 1;
if (task_status != BFI_TSKIM_STS_OK)
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"target reset failure,"
@@ -756,7 +755,6 @@ void
bfad_destroy_workq(struct bfad_im_s *im)
{
if (im && im->drv_workq) {
- flush_workqueue(im->drv_workq);
destroy_workqueue(im->drv_workq);
im->drv_workq = NULL;
}
@@ -797,6 +795,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .cmd_size = sizeof(struct bfad_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
@@ -819,6 +818,7 @@ struct scsi_host_template bfad_im_vport_template = {
.name = BFAD_DRIVER_NAME,
.info = bfad_im_info,
.queuecommand = bfad_im_queuecommand,
+ .cmd_size = sizeof(struct bfad_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = bfad_im_abort_handler,
.eh_device_reset_handler = bfad_im_reset_lun_handler,
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 829345b514d1..c03b225ea1ba 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -43,6 +43,22 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa);
*/
#define IO_DONE_BIT 0
+/**
+ * struct bfad_cmd_priv - private data per SCSI command.
+ * @status: Lowest bit represents IO_DONE. The next seven bits hold a value of
+ * type enum bfi_tskim_status.
+ * @wq: Wait queue used to wait for completion of an operation.
+ */
+struct bfad_cmd_priv {
+ unsigned long status;
+ wait_queue_head_t *wq;
+};
+
+static inline struct bfad_cmd_priv *bfad_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
struct bfad_itnim_data_s {
struct bfad_itnim_s *itnim;
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index b4cea8b06ea1..046247420cfa 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -137,8 +137,6 @@
#define BNX2FC_FW_TIMEOUT (3 * HZ)
#define PORT_MAX 2
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
-
/* FC FCP Status */
#define FC_GOOD 0
@@ -493,7 +491,14 @@ struct bnx2fc_unsol_els {
struct work_struct unsol_els_work;
};
+struct bnx2fc_priv {
+ struct bnx2fc_cmd *io_req;
+};
+static inline struct bnx2fc_priv *bnx2fc_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 71fa62bd3083..05ddbb9bb7d8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -82,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
-static void bnx2fc_destroy_work(struct work_struct *work);
+static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@@ -273,7 +273,6 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct fcoe_port *port;
struct fcoe_hdr *hp;
struct bnx2fc_rport *tgt;
- struct fc_stats *stats;
u8 sof, eof;
u32 crc;
unsigned int hlen, tlen, elen;
@@ -399,10 +398,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
/*update tx stats */
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->TxFrames++;
- stats->TxWords += wlen;
- put_cpu();
+ this_cpu_inc(lport->stats->TxFrames);
+ this_cpu_add(lport->stats->TxWords, wlen);
/* send down to lld */
fr_dev(fp) = lport;
@@ -508,10 +505,10 @@ static int bnx2fc_l2_rcv_thread(void *arg)
static void bnx2fc_recv_frame(struct sk_buff *skb)
{
- u32 fr_len;
+ u64 crc_err;
+ u32 fr_len, fr_crc;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
- struct fc_stats *stats;
struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
@@ -542,6 +539,9 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+ this_cpu_inc(lport->stats->RxFrames);
+ this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
+
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
@@ -624,16 +624,13 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
- stats = per_cpu_ptr(lport->stats, smp_processor_id());
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ fr_crc = le32_to_cpu(fr_crc(fp));
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
+ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
+ crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount);
+ if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
- stats->InvalidCRCCount++;
kfree_skb(skb);
return;
}
@@ -907,9 +904,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
-
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
return;
default:
@@ -962,9 +956,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) =
FC_PORTTYPE_UNKNOWN;
- per_cpu_ptr(lport->stats,
- get_cpu())->LinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->LinkFailureCount);
fcoe_clean_pending_queue(lport);
wait_for_upload = 1;
}
@@ -1215,8 +1207,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@@ -1525,7 +1517,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport;
port->priv = interface;
port->get_netdev = bnx2fc_netdev;
- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
rc = bnx2fc_lport_config(lport);
@@ -1653,8 +1644,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface);
list_del(&interface->list);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
}
/**
@@ -1694,15 +1685,12 @@ netdev_err:
return rc;
}
-static void bnx2fc_destroy_work(struct work_struct *work)
+static void bnx2fc_port_destroy(struct fcoe_port *port)
{
- struct fcoe_port *port;
struct fc_lport *lport;
- port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
-
- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
bnx2fc_if_destroy(lport);
}
@@ -2556,9 +2544,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
-
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
@@ -2723,14 +2708,13 @@ static int __init bnx2fc_mod_init(void)
bg = &bnx2fc_global;
skb_queue_head_init(&bg->fcoe_rx_list);
- l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
- (void *)bg,
- "bnx2fc_l2_thread");
+ l2_thread = kthread_run(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
if (IS_ERR(l2_thread)) {
rc = PTR_ERR(l2_thread);
goto free_wq;
}
- wake_up_process(l2_thread);
spin_lock_bh(&bg->fcoe_rx_list.lock);
bg->kthread = l2_thread;
spin_unlock_bh(&bg->fcoe_rx_list.lock);
@@ -2980,6 +2964,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.track_queue_depth = 1,
.slave_configure = bnx2fc_slave_configure,
.shost_groups = bnx2fc_host_groups,
+ .cmd_size = sizeof(struct bnx2fc_priv),
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 0103f811cc25..776544385598 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1169,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
ofld_kcqe->fcoe_conn_context_id);
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
goto ofld_cmpl_err;
}
/*
@@ -1226,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ERR PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mismatch\n");
return;
}
interface = tgt->port->priv;
if (hba != interface->hba) {
- printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
goto enbl_cmpl_err;
}
if (!ofld_kcqe->completion_status)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index b9114113ee73..b42a9accb832 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -204,7 +204,7 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
sc_cmd->allowed);
scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
- sc_cmd->SCp.ptr = NULL;
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
}
@@ -472,7 +472,7 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
u32 free_sqes;
u32 max_sqes;
u16 xid;
- int index = get_cpu();
+ int index = raw_smp_processor_id();
max_sqes = BNX2FC_SCSI_MAX_SQES;
/*
@@ -485,7 +485,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
(tgt->num_active_ios.counter >= max_sqes) ||
(free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- put_cpu();
return NULL;
}
@@ -498,7 +497,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
atomic_inc(&tgt->num_active_ios);
atomic_dec(&tgt->free_sqes);
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
- put_cpu();
INIT_LIST_HEAD(&io_req->link);
@@ -765,7 +763,7 @@ retry_tmf:
task = &(task_page[index]);
bnx2fc_init_mp_task(io_req, task);
- sc_cmd->SCp.ptr = (char *)io_req;
+ bnx2fc_priv(sc_cmd)->io_req = io_req;
/* Obtain free SQ entry */
spin_lock_bh(&tgt->tgt_lock);
@@ -1147,7 +1145,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
spin_lock_bh(&tgt->tgt_lock);
- io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ io_req = bnx2fc_priv(sc_cmd)->io_req;
if (!io_req) {
/* Command might have just completed */
printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
@@ -1572,8 +1570,8 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
fc_hdr->fh_r_ctl);
}
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
+ if (!bnx2fc_priv(sc_cmd)->io_req) {
+ printk(KERN_ERR PFX "tm_compl: io_req is NULL\n");
return;
}
switch (io_req->fcp_status) {
@@ -1609,7 +1607,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
return;
}
- sc_cmd->SCp.ptr = NULL;
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
@@ -1773,8 +1771,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
io_req->fcp_resid = fcp_rsp->fcp_resid;
io_req->scsi_comp_flags = rsp_flags;
- CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
- fcp_rsp->scsi_status_code;
+ io_req->cdb_status = fcp_rsp->scsi_status_code;
/* Fetch fcp_rsp_info and fcp_sns_info if available */
if (num_rq) {
@@ -1946,8 +1943,8 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* parse fcp_rsp and obtain sense data from RQ if available */
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
- if (!sc_cmd->SCp.ptr) {
- printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ if (!bnx2fc_priv(sc_cmd)->io_req) {
+ printk(KERN_ERR PFX "io_req is NULL\n");
return;
}
@@ -2018,7 +2015,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
io_req->fcp_status);
break;
}
- sc_cmd->SCp.ptr = NULL;
+ bnx2fc_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
@@ -2033,7 +2030,6 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct fc_lport *lport = port->lport;
- struct fc_stats *stats;
int task_idx, index;
u16 xid;
@@ -2044,22 +2040,20 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
io_req->port = port;
io_req->tgt = tgt;
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
- sc_cmd->SCp.ptr = (char *)io_req;
+ bnx2fc_priv(sc_cmd)->io_req = io_req;
- stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
io_req->io_req_flags = BNX2FC_READ;
- stats->InputRequests++;
- stats->InputBytes += io_req->data_xfer_len;
+ this_cpu_inc(lport->stats->InputRequests);
+ this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len);
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
io_req->io_req_flags = BNX2FC_WRITE;
- stats->OutputRequests++;
- stats->OutputBytes += io_req->data_xfer_len;
+ this_cpu_inc(lport->stats->OutputRequests);
+ this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len);
} else {
io_req->io_req_flags = 0;
- stats->ControlRequests++;
+ this_cpu_inc(lport->stats->ControlRequests);
}
- put_cpu();
xid = io_req->xid;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 9200b718085c..2c246e80c1c4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -482,7 +482,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
}
/*
- * Offlaod process is protected with hba mutex.
+ * Offload process is protected with hba mutex.
* Use the same mutex_lock for upload process too
*/
mutex_lock(&hba->hba_mutex);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5521469ce678..6c864b093ac9 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
@@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+ printk(KERN_ALERT "conn destroy- error hba mismatch\n");
return;
}
@@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
}
if (hba != ep->hba) {
- printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+ printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n");
return;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index e21b053b4f3e..a3c800e04a2e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -909,7 +909,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
{
struct Scsi_Host *shost = hba->shost;
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
INIT_LIST_HEAD(&hba->ep_ofld_list);
INIT_LIST_HEAD(&hba->ep_active_list);
INIT_LIST_HEAD(&hba->ep_destroy_list);
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
/* Must suspend all rx queue activity for this ep */
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
}
/* CONN_DISCONNECT timeout may or may not be an issue depending
* on what transcribed in TCP layer, different targets behave
@@ -2268,6 +2268,7 @@ static struct scsi_host_template bnx2i_host_template = {
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
.shost_groups = bnx2i_dev_groups,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
struct iscsi_transport bnx2i_iscsi_transport = {
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 27012908b586..7ab29eaec6f3 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -63,7 +63,7 @@ static int verbose = 1;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose,"be verbose (default: on)");
-static int debug = 0;
+static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug,"enable/disable debug messages, also prints more "
"detailed sense codes on scsi errors (default: off)");
@@ -239,7 +239,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
u_char *buffer;
int result;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if(!buffer)
return -ENOMEM;
@@ -297,7 +297,7 @@ ch_readconfig(scsi_changer *ch)
int result,id,lun,i;
u_int elem;
- buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -783,7 +783,7 @@ static long ch_ioctl(struct file *file,
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);
@@ -877,7 +877,7 @@ static long ch_ioctl(struct file *file,
}
default:
- return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp);
+ return scsi_ioctl(ch->device, file->f_mode, cmd, argp);
}
}
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 55db02521221..05e1a63e00c3 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -166,7 +166,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
/* Check for Task Management */
- if (likely(scmnd->SCp.Message == 0)) {
+ if (likely(csio_priv(scmnd)->fc_tm_flags == 0)) {
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
fcp_cmnd->fc_tm_flags = 0;
fcp_cmnd->fc_cmdref = 0;
@@ -185,7 +185,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
} else {
memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
- fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
+ fcp_cmnd->fc_tm_flags = csio_priv(scmnd)->fc_tm_flags;
}
}
@@ -1366,9 +1366,9 @@ csio_show_hw_state(struct device *dev,
struct csio_hw *hw = csio_lnode_to_hw(ln);
if (csio_is_hw_ready(hw))
- return snprintf(buf, PAGE_SIZE, "ready\n");
- else
- return snprintf(buf, PAGE_SIZE, "not ready\n");
+ return sysfs_emit(buf, "ready\n");
+
+ return sysfs_emit(buf, "not ready\n");
}
/* Device reset */
@@ -1430,7 +1430,7 @@ csio_show_dbg_level(struct device *dev,
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+ return sysfs_emit(buf, "%x\n", ln->params.log_level);
}
/* Store debug level */
@@ -1476,7 +1476,7 @@ csio_show_num_reg_rnodes(struct device *dev,
{
struct csio_lnode *ln = shost_priv(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+ return sysfs_emit(buf, "%d\n", ln->num_reg_rnodes);
}
static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
@@ -1855,7 +1855,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
/* Needed during abort */
cmnd->host_scribble = (unsigned char *)ioreq;
- cmnd->SCp.Message = 0;
+ csio_priv(cmnd)->fc_tm_flags = 0;
/* Kick off SCSI IO SM on the ioreq */
spin_lock_irqsave(&hw->lock, flags);
@@ -2026,7 +2026,7 @@ csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
req, req->wr_status);
/* Cache FW return status */
- cmnd->SCp.Status = req->wr_status;
+ csio_priv(cmnd)->wr_status = req->wr_status;
/* Special handling based on FCP response */
@@ -2049,7 +2049,7 @@ csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
/* Modify return status if flags indicate success */
if (flags & FCP_RSP_LEN_VAL)
if (rsp_info->rsp_code == FCP_TMF_CMPL)
- cmnd->SCp.Status = FW_SUCCESS;
+ csio_priv(cmnd)->wr_status = FW_SUCCESS;
csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
}
@@ -2125,9 +2125,9 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
csio_scsi_cmnd(ioreq) = cmnd;
cmnd->host_scribble = (unsigned char *)ioreq;
- cmnd->SCp.Status = 0;
+ csio_priv(cmnd)->wr_status = 0;
- cmnd->SCp.Message = FCP_TMF_LUN_RESET;
+ csio_priv(cmnd)->fc_tm_flags = FCP_TMF_LUN_RESET;
ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
/*
@@ -2178,9 +2178,10 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
}
/* LUN reset returned, check cached status */
- if (cmnd->SCp.Status != FW_SUCCESS) {
+ if (csio_priv(cmnd)->wr_status != FW_SUCCESS) {
csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
- cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
+ cmnd->device->id, cmnd->device->lun,
+ csio_priv(cmnd)->wr_status);
goto fail;
}
@@ -2271,6 +2272,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
.name = CSIO_DRV_DESC,
.proc_name = KBUILD_MODNAME,
.queuecommand = csio_queuecommand,
+ .cmd_size = sizeof(struct csio_cmd_priv),
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = csio_eh_abort_handler,
.eh_device_reset_handler = csio_eh_lun_reset_handler,
diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h
index 2257c3dcf724..39dda3c88f0d 100644
--- a/drivers/scsi/csiostor/csio_scsi.h
+++ b/drivers/scsi/csiostor/csio_scsi.h
@@ -188,6 +188,16 @@ struct csio_scsi_level_data {
uint64_t oslun;
};
+struct csio_cmd_priv {
+ uint8_t fc_tm_flags; /* task management flags */
+ uint16_t wr_status;
+};
+
+static inline struct csio_cmd_priv *csio_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline struct csio_ioreq *
csio_get_scsi_ioreq(struct csio_scsim *scm)
{
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index f949a4e00783..ff9d4287937a 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -98,6 +98,7 @@ static struct scsi_host_template cxgb3i_host_template = {
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport cxgb3i_iscsi_transport = {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index efb3e2b3398e..c07d2e3b4bcf 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -116,6 +116,7 @@ static struct scsi_host_template cxgb4i_host_template = {
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport cxgb4i_iscsi_transport = {
@@ -253,7 +254,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else if (is_t5(lldi->adapter_type)) {
struct cpl_t5_act_open_req *req =
(struct cpl_t5_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
@@ -281,7 +282,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
} else {
struct cpl_t6_act_open_req *req =
(struct cpl_t6_act_open_req *)skb->head;
- u32 isn = (prandom_u32() & ~7UL) - 1;
+ u32 isn = (get_random_u32() & ~7UL) - 1;
INIT_TP_WR(req, 0);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 8c7d4dda4cf2..af281e271f88 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -328,7 +328,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
chba = cdev->hbas[i];
if (chba) {
cdev->hbas[i] = NULL;
- iscsi_host_remove(chba->shost);
+ iscsi_host_remove(chba->shost, false);
pci_dev_put(cdev->pdev);
iscsi_host_free(chba->shost);
}
@@ -1455,7 +1455,7 @@ void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
if (conn) {
log_debug(1 << CXGBI_DBG_SOCK,
"csk 0x%p, cid %d.\n", csk, conn->id);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
}
EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p.\n", csk, conn);
- if (unlikely(!conn || conn->suspend_rx)) {
+ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
csk, conn, conn ? conn->id : 0xFF,
- conn ? conn->suspend_rx : 0xFF);
+ conn ? conn->flags : 0xFF);
return;
}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index e7be95ee7d64..cd1324ec742d 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -132,7 +132,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
break;
case SISL_AFU_RC_OUT_OF_DATA_BUFS:
/* Retry */
- scp->result = (DID_ALLOC_FAILURE << 16);
+ scp->result = (DID_ERROR << 16);
break;
default:
scp->result = (DID_ERROR << 16);
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 244fc27215dc..631eda2d467e 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -16,6 +16,7 @@
#include <linux/poll.h>
#include <linux/sched/signal.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/xive.h>
#include <misc/ocxl.h>
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 01917b28cdb6..5c74dc7c2288 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -430,8 +430,8 @@ static int write_same16(struct scsi_device *sdev,
struct device *dev = &cfg->dev->dev;
const u32 s = ilog2(sdev->sector_size) - 9;
const u32 to = sdev->request_queue->rq_timeout;
- const u32 ws_limit = blk_queue_get_max_sectors(sdev->request_queue,
- REQ_OP_WRITE_SAME) >> s;
+ const u32 ws_limit =
+ sdev->request_queue->limits.max_write_zeroes_sectors >> s;
cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 9b8796c9e634..670a836a6ba1 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -946,7 +946,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* layer, invoke 'done' on completion
*
* @cmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* Returns 1 if the adapter (host) is busy, else returns 0. One
* reason for an adapter to be busy is that the number
@@ -959,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
* and is expected to be held on return.
*
- **/
+ */
static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
@@ -3315,9 +3314,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
/* Here is the info for Doug Gilbert's sg3 ... */
scsi_set_resid(cmd, srb->total_xfer_length);
- /* This may be interpreted by sb. or not ... */
- cmd->SCp.this_residual = srb->total_xfer_length;
- cmd->SCp.buffers_residual = 0;
if (debug_enabled(DBG_KG)) {
if (srb->total_xfer_length)
dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
@@ -3589,10 +3585,19 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
#endif
if (dcb->target_lun != 0) {
/* Copy settings */
- struct DeviceCtlBlk *p;
- list_for_each_entry(p, &acb->dcb_list, list)
- if (p->target_id == dcb->target_id)
+ struct DeviceCtlBlk *p = NULL, *iter;
+
+ list_for_each_entry(iter, &acb->dcb_list, list)
+ if (iter->target_id == dcb->target_id) {
+ p = iter;
break;
+ }
+
+ if (!p) {
+ kfree(dcb);
+ return NULL;
+ }
+
dprintkdbg(DBG_1,
"device_alloc: <%02i-%i> copy from <%02i-%i>\n",
dcb->target_id, dcb->target_lun,
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 37d06f993b76..610a51538f03 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -127,7 +127,7 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
u8 cdb[MAX_COMMAND_SIZE];
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the command. */
@@ -157,7 +157,7 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
u8 cdb[MAX_COMMAND_SIZE];
unsigned char stpg_data[8];
int stpg_len = 8;
- int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
/* Prepare the data buffer */
@@ -1172,9 +1172,8 @@ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
case SCSI_ACCESS_STATE_OPTIMAL:
case SCSI_ACCESS_STATE_ACTIVE:
case SCSI_ACCESS_STATE_LBA:
- return BLK_STS_OK;
case SCSI_ACCESS_STATE_TRANSITIONING:
- return BLK_STS_AGAIN;
+ return BLK_STS_OK;
default:
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index bd28ec6cfb72..2e21ab447873 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -239,7 +239,7 @@ static int send_trespass_cmd(struct scsi_device *sdev,
unsigned char cdb[MAX_COMMAND_SIZE];
int err, res = SCSI_DH_OK, len;
struct scsi_sense_hdr sshdr;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 4a3f7831a2d6..0d2cfa60aa06 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -83,7 +83,7 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
unsigned char cmd[6] = { TEST_UNIT_READY };
struct scsi_sense_hdr sshdr;
int ret = SCSI_DH_OK, res;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
@@ -121,7 +121,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_device *sdev = h->sdev;
int res, rc = SCSI_DH_OK;
int retry_cnt = HP_SW_RETRIES;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
retry:
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 66652ab409cc..bf8754741f85 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -536,7 +536,7 @@ static void send_mode_select(struct work_struct *work)
unsigned char cdb[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
unsigned int data_size;
- u64 req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
spin_lock(&ctlr->ms_lock);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 6df60b31ecb0..a171ce6b70b2 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -52,7 +52,7 @@ static struct scsi_host_template dmx3191d_driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int dmx3191d_probe_one(struct pci_dev *pdev,
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
deleted file mode 100644
index bf0daeeb50a9..000000000000
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ /dev/null
@@ -1,441 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _SCSI_I2O_H
-#define _SCSI_I2O_H
-
-/* I2O kernel space accessible structures/APIs
- *
- * (c) Copyright 1999, 2000 Red Hat Software
- *
- *************************************************************************
- *
- * This header file defined the I2O APIs/structures for use by
- * the I2O kernel modules.
- */
-
-#ifdef __KERNEL__ /* This file to be included by kernel only */
-
-#include <linux/i2o-dev.h>
-
-#include <linux/notifier.h>
-#include <linux/atomic.h>
-
-
-/*
- * Tunable parameters first
- */
-
-/* How many different OSM's are we allowing */
-#define MAX_I2O_MODULES 64
-
-#define I2O_EVT_CAPABILITY_OTHER 0x01
-#define I2O_EVT_CAPABILITY_CHANGED 0x02
-
-#define I2O_EVT_SENSOR_STATE_CHANGED 0x01
-
-//#ifdef __KERNEL__ /* ioctl stuff only thing exported to users */
-
-#define I2O_MAX_MANAGERS 4
-
-/*
- * I2O Interface Objects
- */
-
-#include <linux/wait.h>
-typedef wait_queue_head_t adpt_wait_queue_head_t;
-#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
-typedef wait_queue_entry_t adpt_wait_queue_entry_t;
-
-/*
- * message structures
- */
-
-struct i2o_message
-{
- u8 version_offset;
- u8 flags;
- u16 size;
- u32 target_tid:12;
- u32 init_tid:12;
- u32 function:8;
- u32 initiator_context;
- /* List follows */
-};
-
-struct adpt_device;
-struct _adpt_hba;
-struct i2o_device
-{
- struct i2o_device *next; /* Chain */
- struct i2o_device *prev;
-
- char dev_name[8]; /* linux /dev name if available */
- i2o_lct_entry lct_data;/* Device LCT information */
- u32 flags;
- struct proc_dir_entry* proc_entry; /* /proc dir */
- struct adpt_device *owner;
- struct _adpt_hba *controller; /* Controlling IOP */
-};
-
-/*
- * Each I2O controller has one of these objects
- */
-
-struct i2o_controller
-{
- char name[16];
- int unit;
- int type;
- int enabled;
-
- struct notifier_block *event_notifer; /* Events */
- atomic_t users;
- struct i2o_device *devices; /* I2O device chain */
- struct i2o_controller *next; /* Controller chain */
-
-};
-
-/*
- * I2O System table entry
- */
-struct i2o_sys_tbl_entry
-{
- u16 org_id;
- u16 reserved1;
- u32 iop_id:12;
- u32 reserved2:20;
- u16 seg_num:12;
- u16 i2o_version:4;
- u8 iop_state;
- u8 msg_type;
- u16 frame_size;
- u16 reserved3;
- u32 last_changed;
- u32 iop_capabilities;
- u32 inbound_low;
- u32 inbound_high;
-};
-
-struct i2o_sys_tbl
-{
- u8 num_entries;
- u8 version;
- u16 reserved1;
- u32 change_ind;
- u32 reserved2;
- u32 reserved3;
- struct i2o_sys_tbl_entry iops[0];
-};
-
-/*
- * I2O classes / subclasses
- */
-
-/* Class ID and Code Assignments
- * (LCT.ClassID.Version field)
- */
-#define I2O_CLASS_VERSION_10 0x00
-#define I2O_CLASS_VERSION_11 0x01
-
-/* Class code names
- * (from v1.5 Table 6-1 Class Code Assignments.)
- */
-
-#define I2O_CLASS_EXECUTIVE 0x000
-#define I2O_CLASS_DDM 0x001
-#define I2O_CLASS_RANDOM_BLOCK_STORAGE 0x010
-#define I2O_CLASS_SEQUENTIAL_STORAGE 0x011
-#define I2O_CLASS_LAN 0x020
-#define I2O_CLASS_WAN 0x030
-#define I2O_CLASS_FIBRE_CHANNEL_PORT 0x040
-#define I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL 0x041
-#define I2O_CLASS_SCSI_PERIPHERAL 0x051
-#define I2O_CLASS_ATE_PORT 0x060
-#define I2O_CLASS_ATE_PERIPHERAL 0x061
-#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
-#define I2O_CLASS_FLOPPY_DEVICE 0x071
-#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
-#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
-#define I2O_CLASS_PEER_TRANSPORT 0x091
-
-/* Rest of 0x092 - 0x09f reserved for peer-to-peer classes
- */
-
-#define I2O_CLASS_MATCH_ANYCLASS 0xffffffff
-
-/* Subclasses
- */
-
-#define I2O_SUBCLASS_i960 0x001
-#define I2O_SUBCLASS_HDM 0x020
-#define I2O_SUBCLASS_ISM 0x021
-
-/* Operation functions */
-
-#define I2O_PARAMS_FIELD_GET 0x0001
-#define I2O_PARAMS_LIST_GET 0x0002
-#define I2O_PARAMS_MORE_GET 0x0003
-#define I2O_PARAMS_SIZE_GET 0x0004
-#define I2O_PARAMS_TABLE_GET 0x0005
-#define I2O_PARAMS_FIELD_SET 0x0006
-#define I2O_PARAMS_LIST_SET 0x0007
-#define I2O_PARAMS_ROW_ADD 0x0008
-#define I2O_PARAMS_ROW_DELETE 0x0009
-#define I2O_PARAMS_TABLE_CLEAR 0x000A
-
-/*
- * I2O serial number conventions / formats
- * (circa v1.5)
- */
-
-#define I2O_SNFORMAT_UNKNOWN 0
-#define I2O_SNFORMAT_BINARY 1
-#define I2O_SNFORMAT_ASCII 2
-#define I2O_SNFORMAT_UNICODE 3
-#define I2O_SNFORMAT_LAN48_MAC 4
-#define I2O_SNFORMAT_WAN 5
-
-/* Plus new in v2.0 (Yellowstone pdf doc)
- */
-
-#define I2O_SNFORMAT_LAN64_MAC 6
-#define I2O_SNFORMAT_DDM 7
-#define I2O_SNFORMAT_IEEE_REG64 8
-#define I2O_SNFORMAT_IEEE_REG128 9
-#define I2O_SNFORMAT_UNKNOWN2 0xff
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
-/*
- * Messaging API values
- */
-
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-
-#define I2O_PRIVATE_MSG 0xFF
-
-/*
- * Init Outbound Q status
- */
-
-#define I2O_CMD_OUTBOUND_INIT_IN_PROGRESS 0x01
-#define I2O_CMD_OUTBOUND_INIT_REJECTED 0x02
-#define I2O_CMD_OUTBOUND_INIT_FAILED 0x03
-#define I2O_CMD_OUTBOUND_INIT_COMPLETE 0x04
-
-/*
- * I2O Get Status State values
- */
-
-#define ADAPTER_STATE_INITIALIZING 0x01
-#define ADAPTER_STATE_RESET 0x02
-#define ADAPTER_STATE_HOLD 0x04
-#define ADAPTER_STATE_READY 0x05
-#define ADAPTER_STATE_OPERATIONAL 0x08
-#define ADAPTER_STATE_FAILED 0x10
-#define ADAPTER_STATE_FAULTED 0x11
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-/* Default is 1.5, FIXME: Need support for both 1.5 and 2.0 */
-#define I2OVERSION I2OVER15
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-
-#define TRL_OFFSET_5 (0x0050 | I2OVERSION)
-#define TRL_OFFSET_6 (0x0060 | I2OVERSION)
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_LAST 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-
-/* Special TID Assignments */
-
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-#define MSG_FRAME_SIZE 128
-#define NMBR_MSG_FRAMES 128
-
-#define MSG_POOL_SIZE 16384
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-
-#endif /* __KERNEL__ */
-
-#endif /* _SCSI_I2O_H */
diff --git a/drivers/scsi/dpt/dpti_ioctl.h b/drivers/scsi/dpt/dpti_ioctl.h
deleted file mode 100644
index 25e9251f8c78..000000000000
--- a/drivers/scsi/dpt/dpti_ioctl.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti_ioctl.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-/***************************************************************************
- * This file is generated from osd_unix.h *
- * *************************************************************************/
-
-#ifndef _dpti_ioctl_h
-#define _dpti_ioctl_h
-
-// IOCTL interface commands
-
-#ifndef _IOWR
-# define _IOWR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOW
-# define _IOW(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IOR
-# define _IOR(x,y,z) (((x)<<8)|y)
-#endif
-#ifndef _IO
-# define _IO(x,y) (((x)<<8)|y)
-#endif
-/* EATA PassThrough Command */
-#define EATAUSRCMD _IOWR('D',65,EATA_CP)
-/* Set Debug Level If Enabled */
-#define DPT_DEBUG _IOW('D',66,int)
-/* Get Signature Structure */
-#define DPT_SIGNATURE _IOR('D',67,dpt_sig_S)
-#if defined __bsdi__
-#define DPT_SIGNATURE_PACKED _IOR('D',67,dpt_sig_S_Packed)
-#endif
-/* Get Number Of DPT Adapters */
-#define DPT_NUMCTRLS _IOR('D',68,int)
-/* Get Adapter Info Structure */
-#define DPT_CTRLINFO _IOR('D',69,CtrlInfo)
-/* Get Statistics If Enabled */
-#define DPT_STATINFO _IO('D',70)
-/* Clear Stats If Enabled */
-#define DPT_CLRSTAT _IO('D',71)
-/* Get System Info Structure */
-#define DPT_SYSINFO _IOR('D',72,sysInfo_S)
-/* Set Timeout Value */
-#define DPT_TIMEOUT _IO('D',73)
-/* Get config Data */
-#define DPT_CONFIG _IO('D',74)
-/* Get Blink LED Code */
-#define DPT_BLINKLED _IOR('D',75,int)
-/* Get Statistical information (if available) */
-#define DPT_STATS_INFO _IOR('D',80,STATS_DATA)
-/* Clear the statistical information */
-#define DPT_STATS_CLEAR _IO('D',81)
-/* Get Performance metrics */
-#define DPT_PERF_INFO _IOR('D',82,dpt_perf_t)
-/* Send an I2O command */
-#define I2OUSRCMD _IO('D',76)
-/* Inform driver to re-acquire LCT information */
-#define I2ORESCANCMD _IO('D',77)
-/* Inform driver to reset adapter */
-#define I2ORESETCMD _IO('D',78)
-/* See if the target is mounted */
-#define DPT_TARGET_BUSY _IOR('D',79, TARGET_BUSY_T)
-
-
- /* Structure Returned From Get Controller Info */
-
-typedef struct {
- uCHAR state; /* Operational state */
- uCHAR id; /* Host adapter SCSI id */
- int vect; /* Interrupt vector number */
- int base; /* Base I/O address */
- int njobs; /* # of jobs sent to HA */
- int qdepth; /* Controller queue depth. */
- int wakebase; /* mpx wakeup base index. */
- uINT SGsize; /* Scatter/Gather list size. */
- unsigned heads; /* heads for drives on cntlr. */
- unsigned sectors; /* sectors for drives on cntlr. */
- uCHAR do_drive32; /* Flag for Above 16 MB Ability */
- uCHAR BusQuiet; /* SCSI Bus Quiet Flag */
- char idPAL[4]; /* 4 Bytes Of The ID Pal */
- uCHAR primary; /* 1 For Primary, 0 For Secondary */
- uCHAR eataVersion; /* EATA Version */
- uINT cpLength; /* EATA Command Packet Length */
- uINT spLength; /* EATA Status Packet Length */
- uCHAR drqNum; /* DRQ Index (0,5,6,7) */
- uCHAR flag1; /* EATA Flags 1 (Byte 9) */
- uCHAR flag2; /* EATA Flags 2 (Byte 30) */
-} CtrlInfo;
-
-typedef struct {
- uSHORT length; // Remaining length of this
- uSHORT drvrHBAnum; // Relative HBA # used by the driver
- uINT baseAddr; // Base I/O address
- uSHORT blinkState; // Blink LED state (0=Not in blink LED)
- uCHAR pciBusNum; // PCI Bus # (Optional)
- uCHAR pciDeviceNum; // PCI Device # (Optional)
- uSHORT hbaFlags; // Miscellaneous HBA flags
- uSHORT Interrupt; // Interrupt set for this device.
-# if (defined(_DPT_ARC))
- uINT baseLength;
- ADAPTER_OBJECT *AdapterObject;
- LARGE_INTEGER DmaLogicalAddress;
- PVOID DmaVirtualAddress;
- LARGE_INTEGER ReplyLogicalAddress;
- PVOID ReplyVirtualAddress;
-# else
- uINT reserved1; // Reserved for future expansion
- uINT reserved2; // Reserved for future expansion
- uINT reserved3; // Reserved for future expansion
-# endif
-} drvrHBAinfo_S;
-
-typedef struct TARGET_BUSY
-{
- uLONG channel;
- uLONG id;
- uLONG lun;
- uLONG isBusy;
-} TARGET_BUSY_T;
-
-#endif
-
diff --git a/drivers/scsi/dpt/dptsig.h b/drivers/scsi/dpt/dptsig.h
deleted file mode 100644
index a6644b332b53..000000000000
--- a/drivers/scsi/dpt/dptsig.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* BSDI dptsig.h,v 1.7 1998/06/03 19:15:00 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __DPTSIG_H_
-#define __DPTSIG_H_
-#ifdef _SINIX_ADDON
-#include "dpt.h"
-#endif
-/* DPT SIGNATURE SPEC AND HEADER FILE */
-/* Signature Version 1 (sorry no 'A') */
-
-/* to make sure we are talking the same size under all OS's */
-typedef unsigned char sigBYTE;
-typedef unsigned short sigWORD;
-typedef unsigned int sigINT;
-
-/*
- * use sigWORDLittleEndian for:
- * dsCapabilities
- * dsDeviceSupp
- * dsAdapterSupp
- * dsApplication
- * use sigLONGLittleEndian for:
- * dsOS
- * so that the sig can be standardised to Little Endian
- */
-#if (defined(_DPT_BIG_ENDIAN))
-# define sigWORDLittleEndian(x) ((((x)&0xFF)<<8)|(((x)>>8)&0xFF))
-# define sigLONGLittleEndian(x) \
- ((((x)&0xFF)<<24) | \
- (((x)&0xFF00)<<8) | \
- (((x)&0xFF0000L)>>8) | \
- (((x)&0xFF000000L)>>24))
-#else
-# define sigWORDLittleEndian(x) (x)
-# define sigLONGLittleEndian(x) (x)
-#endif
-
-/* must make sure the structure is not word or double-word aligned */
-/* --------------------------------------------------------------- */
-/* Borland will ignore the following pragma: */
-/* Word alignment is OFF by default. If in the, IDE make */
-/* sure that Options | Compiler | Code Generation | Word Alignment */
-/* is not checked. If using BCC, do not use the -a option. */
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=mac68k
-#endif
-
-
-/* Current Signature Version - sigBYTE dsSigVersion; */
-/* ------------------------------------------------------------------ */
-#define SIG_VERSION 1
-
-/* Processor Family - sigBYTE dsProcessorFamily; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-/* What type of processor the file is meant to run on. */
-/* This will let us know whether to read sigWORDs as high/low or low/high. */
-#define PROC_INTEL 0x00 /* Intel 80x86/ia64 */
-#define PROC_MOTOROLA 0x01 /* Motorola 68K */
-#define PROC_MIPS4000 0x02 /* MIPS RISC 4000 */
-#define PROC_ALPHA 0x03 /* DEC Alpha */
-#define PROC_POWERPC 0x04 /* IBM Power PC */
-#define PROC_i960 0x05 /* Intel i960 */
-#define PROC_ULTRASPARC 0x06 /* SPARC processor */
-
-/* Specific Minimim Processor - sigBYTE dsProcessor; FLAG BITS */
-/* ------------------------------------------------------------------ */
-/* Different bit definitions dependent on processor_family */
-
-/* PROC_INTEL: */
-#define PROC_8086 0x01 /* Intel 8086 */
-#define PROC_286 0x02 /* Intel 80286 */
-#define PROC_386 0x04 /* Intel 80386 */
-#define PROC_486 0x08 /* Intel 80486 */
-#define PROC_PENTIUM 0x10 /* Intel 586 aka P5 aka Pentium */
-#define PROC_SEXIUM 0x20 /* Intel 686 aka P6 aka Pentium Pro or MMX */
-#define PROC_IA64 0x40 /* Intel IA64 processor */
-
-/* PROC_i960: */
-#define PROC_960RX 0x01 /* Intel 80960RC/RD */
-#define PROC_960HX 0x02 /* Intel 80960HA/HD/HT */
-
-/* PROC_MOTOROLA: */
-#define PROC_68000 0x01 /* Motorola 68000 */
-#define PROC_68010 0x02 /* Motorola 68010 */
-#define PROC_68020 0x04 /* Motorola 68020 */
-#define PROC_68030 0x08 /* Motorola 68030 */
-#define PROC_68040 0x10 /* Motorola 68040 */
-
-/* PROC_POWERPC */
-#define PROC_PPC601 0x01 /* PowerPC 601 */
-#define PROC_PPC603 0x02 /* PowerPC 603 */
-#define PROC_PPC604 0x04 /* PowerPC 604 */
-
-/* PROC_MIPS4000: */
-#define PROC_R4000 0x01 /* MIPS R4000 */
-
-/* Filetype - sigBYTE dsFiletype; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define FT_EXECUTABLE 0 /* Executable Program */
-#define FT_SCRIPT 1 /* Script/Batch File??? */
-#define FT_HBADRVR 2 /* HBA Driver */
-#define FT_OTHERDRVR 3 /* Other Driver */
-#define FT_IFS 4 /* Installable Filesystem Driver */
-#define FT_ENGINE 5 /* DPT Engine */
-#define FT_COMPDRVR 6 /* Compressed Driver Disk */
-#define FT_LANGUAGE 7 /* Foreign Language file */
-#define FT_FIRMWARE 8 /* Downloadable or actual Firmware */
-#define FT_COMMMODL 9 /* Communications Module */
-#define FT_INT13 10 /* INT 13 style HBA Driver */
-#define FT_HELPFILE 11 /* Help file */
-#define FT_LOGGER 12 /* Event Logger */
-#define FT_INSTALL 13 /* An Install Program */
-#define FT_LIBRARY 14 /* Storage Manager Real-Mode Calls */
-#define FT_RESOURCE 15 /* Storage Manager Resource File */
-#define FT_MODEM_DB 16 /* Storage Manager Modem Database */
-
-/* Filetype flags - sigBYTE dsFiletypeFlags; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define FTF_DLL 0x01 /* Dynamic Link Library */
-#define FTF_NLM 0x02 /* Netware Loadable Module */
-#define FTF_OVERLAYS 0x04 /* Uses overlays */
-#define FTF_DEBUG 0x08 /* Debug version */
-#define FTF_TSR 0x10 /* TSR */
-#define FTF_SYS 0x20 /* DOS Loadable driver */
-#define FTF_PROTECTED 0x40 /* Runs in protected mode */
-#define FTF_APP_SPEC 0x80 /* Application Specific */
-#define FTF_ROM (FTF_SYS|FTF_TSR) /* Special Case */
-
-/* OEM - sigBYTE dsOEM; DISTINCT VALUES */
-/* ------------------------------------------------------------------ */
-#define OEM_DPT 0 /* DPT */
-#define OEM_ATT 1 /* ATT */
-#define OEM_NEC 2 /* NEC */
-#define OEM_ALPHA 3 /* Alphatronix */
-#define OEM_AST 4 /* AST */
-#define OEM_OLIVETTI 5 /* Olivetti */
-#define OEM_SNI 6 /* Siemens/Nixdorf */
-#define OEM_SUN 7 /* SUN Microsystems */
-
-/* Operating System - sigLONG dsOS; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define OS_DOS 0x00000001 /* PC/MS-DOS */
-#define OS_WINDOWS 0x00000002 /* Microsoft Windows 3.x */
-#define OS_WINDOWS_NT 0x00000004 /* Microsoft Windows NT */
-#define OS_OS2M 0x00000008 /* OS/2 1.2.x,MS 1.3.0,IBM 1.3.x - Monolithic */
-#define OS_OS2L 0x00000010 /* Microsoft OS/2 1.301 - LADDR */
-#define OS_OS22x 0x00000020 /* IBM OS/2 2.x */
-#define OS_NW286 0x00000040 /* Novell NetWare 286 */
-#define OS_NW386 0x00000080 /* Novell NetWare 386 */
-#define OS_GEN_UNIX 0x00000100 /* Generic Unix */
-#define OS_SCO_UNIX 0x00000200 /* SCO Unix */
-#define OS_ATT_UNIX 0x00000400 /* ATT Unix */
-#define OS_UNIXWARE 0x00000800 /* USL Unix */
-#define OS_INT_UNIX 0x00001000 /* Interactive Unix */
-#define OS_SOLARIS 0x00002000 /* SunSoft Solaris */
-#define OS_QNX 0x00004000 /* QNX for Tom Moch */
-#define OS_NEXTSTEP 0x00008000 /* NeXTSTEP/OPENSTEP/MACH */
-#define OS_BANYAN 0x00010000 /* Banyan Vines */
-#define OS_OLIVETTI_UNIX 0x00020000/* Olivetti Unix */
-#define OS_MAC_OS 0x00040000 /* Mac OS */
-#define OS_WINDOWS_95 0x00080000 /* Microsoft Windows '95 */
-#define OS_NW4x 0x00100000 /* Novell Netware 4.x */
-#define OS_BSDI_UNIX 0x00200000 /* BSDi Unix BSD/OS 2.0 and up */
-#define OS_AIX_UNIX 0x00400000 /* AIX Unix */
-#define OS_FREE_BSD 0x00800000 /* FreeBSD Unix */
-#define OS_LINUX 0x01000000 /* Linux */
-#define OS_DGUX_UNIX 0x02000000 /* Data General Unix */
-#define OS_SINIX_N 0x04000000 /* SNI SINIX-N */
-#define OS_PLAN9 0x08000000 /* ATT Plan 9 */
-#define OS_TSX 0x10000000 /* SNH TSX-32 */
-
-#define OS_OTHER 0x80000000 /* Other */
-
-/* Capabilities - sigWORD dsCapabilities; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define CAP_RAID0 0x0001 /* RAID-0 */
-#define CAP_RAID1 0x0002 /* RAID-1 */
-#define CAP_RAID3 0x0004 /* RAID-3 */
-#define CAP_RAID5 0x0008 /* RAID-5 */
-#define CAP_SPAN 0x0010 /* Spanning */
-#define CAP_PASS 0x0020 /* Provides passthrough */
-#define CAP_OVERLAP 0x0040 /* Passthrough supports overlapped commands */
-#define CAP_ASPI 0x0080 /* Supports ASPI Command Requests */
-#define CAP_ABOVE16MB 0x0100 /* ISA Driver supports greater than 16MB */
-#define CAP_EXTEND 0x8000 /* Extended info appears after description */
-#ifdef SNI_MIPS
-#define CAP_CACHEMODE 0x1000 /* dpt_force_cache is set in driver */
-#endif
-
-/* Devices Supported - sigWORD dsDeviceSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define DEV_DASD 0x0001 /* DASD (hard drives) */
-#define DEV_TAPE 0x0002 /* Tape drives */
-#define DEV_PRINTER 0x0004 /* Printers */
-#define DEV_PROC 0x0008 /* Processors */
-#define DEV_WORM 0x0010 /* WORM drives */
-#define DEV_CDROM 0x0020 /* CD-ROM drives */
-#define DEV_SCANNER 0x0040 /* Scanners */
-#define DEV_OPTICAL 0x0080 /* Optical Drives */
-#define DEV_JUKEBOX 0x0100 /* Jukebox */
-#define DEV_COMM 0x0200 /* Communications Devices */
-#define DEV_OTHER 0x0400 /* Other Devices */
-#define DEV_ALL 0xFFFF /* All SCSI Devices */
-
-/* Adapters Families Supported - sigWORD dsAdapterSupp; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define ADF_2001 0x0001 /* PM2001 */
-#define ADF_2012A 0x0002 /* PM2012A */
-#define ADF_PLUS_ISA 0x0004 /* PM2011,PM2021 */
-#define ADF_PLUS_EISA 0x0008 /* PM2012B,PM2022 */
-#define ADF_SC3_ISA 0x0010 /* PM2021 */
-#define ADF_SC3_EISA 0x0020 /* PM2022,PM2122, etc */
-#define ADF_SC3_PCI 0x0040 /* SmartCache III PCI */
-#define ADF_SC4_ISA 0x0080 /* SmartCache IV ISA */
-#define ADF_SC4_EISA 0x0100 /* SmartCache IV EISA */
-#define ADF_SC4_PCI 0x0200 /* SmartCache IV PCI */
-#define ADF_SC5_PCI 0x0400 /* Fifth Generation I2O products */
-/*
- * Combinations of products
- */
-#define ADF_ALL_2000 (ADF_2001|ADF_2012A)
-#define ADF_ALL_PLUS (ADF_PLUS_ISA|ADF_PLUS_EISA)
-#define ADF_ALL_SC3 (ADF_SC3_ISA|ADF_SC3_EISA|ADF_SC3_PCI)
-#define ADF_ALL_SC4 (ADF_SC4_ISA|ADF_SC4_EISA|ADF_SC4_PCI)
-#define ADF_ALL_SC5 (ADF_SC5_PCI)
-/* All EATA Cacheing Products */
-#define ADF_ALL_CACHE (ADF_ALL_PLUS|ADF_ALL_SC3|ADF_ALL_SC4)
-/* All EATA Bus Mastering Products */
-#define ADF_ALL_MASTER (ADF_2012A|ADF_ALL_CACHE)
-/* All EATA Adapter Products */
-#define ADF_ALL_EATA (ADF_2001|ADF_ALL_MASTER)
-#define ADF_ALL ADF_ALL_EATA
-
-/* Application - sigWORD dsApplication; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define APP_DPTMGR 0x0001 /* DPT Storage Manager */
-#define APP_ENGINE 0x0002 /* DPT Engine */
-#define APP_SYTOS 0x0004 /* Sytron Sytos Plus */
-#define APP_CHEYENNE 0x0008 /* Cheyenne ARCServe + ARCSolo */
-#define APP_MSCDEX 0x0010 /* Microsoft CD-ROM extensions */
-#define APP_NOVABACK 0x0020 /* NovaStor Novaback */
-#define APP_AIM 0x0040 /* Archive Information Manager */
-
-/* Requirements - sigBYTE dsRequirements; FLAG BITS */
-/* ------------------------------------------------------------------ */
-#define REQ_SMARTROM 0x01 /* Requires SmartROM to be present */
-#define REQ_DPTDDL 0x02 /* Requires DPTDDL.SYS to be loaded */
-#define REQ_HBA_DRIVER 0x04 /* Requires an HBA driver to be loaded */
-#define REQ_ASPI_TRAN 0x08 /* Requires an ASPI Transport Modules */
-#define REQ_ENGINE 0x10 /* Requires a DPT Engine to be loaded */
-#define REQ_COMM_ENG 0x20 /* Requires a DPT Communications Engine */
-
-/*
- * You may adjust dsDescription_size with an override to a value less than
- * 50 so that the structure allocates less real space.
- */
-#if (!defined(dsDescription_size))
-# define dsDescription_size 50
-#endif
-
-typedef struct dpt_sig {
- char dsSignature[6]; /* ALWAYS "dPtSiG" */
- sigBYTE dsSigVersion; /* signature version (currently 1) */
- sigBYTE dsProcessorFamily; /* what type of processor */
- sigBYTE dsProcessor; /* precise processor */
- sigBYTE dsFiletype; /* type of file */
- sigBYTE dsFiletypeFlags; /* flags to specify load type, etc. */
- sigBYTE dsOEM; /* OEM file was created for */
- sigINT dsOS; /* which Operating systems */
- sigWORD dsCapabilities; /* RAID levels, etc. */
- sigWORD dsDeviceSupp; /* Types of SCSI devices supported */
- sigWORD dsAdapterSupp; /* DPT adapter families supported */
- sigWORD dsApplication; /* applications file is for */
- sigBYTE dsRequirements; /* Other driver dependencies */
- sigBYTE dsVersion; /* 1 */
- sigBYTE dsRevision; /* 'J' */
- sigBYTE dsSubRevision; /* '9' ' ' if N/A */
- sigBYTE dsMonth; /* creation month */
- sigBYTE dsDay; /* creation day */
- sigBYTE dsYear; /* creation year since 1980 (1993=13) */
- /* description (NULL terminated) */
- char dsDescription[dsDescription_size];
-} dpt_sig_S;
-/* 32 bytes minimum - with no description. Put NULL at description[0] */
-/* 81 bytes maximum - with 49 character description plus NULL. */
-
-/* This line added at Roycroft's request */
-/* Microsoft's NT compiler gets confused if you do a pack and don't */
-/* restore it. */
-
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif
-/* For the Macintosh */
-#ifdef STRUCTALIGNMENTSUPPORTED
-#pragma options align=reset
-#endif
-
-#endif
diff --git a/drivers/scsi/dpt/osd_defs.h b/drivers/scsi/dpt/osd_defs.h
deleted file mode 100644
index de3ae5722982..000000000000
--- a/drivers/scsi/dpt/osd_defs.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* BSDI osd_defs.h,v 1.4 1998/06/03 19:14:58 karels Exp */
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef _OSD_DEFS_H
-#define _OSD_DEFS_H
-
-/*File - OSD_DEFS.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains the OS dependent defines. This file is included
- *in osd_util.h and provides the OS specific defines for that file.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/31/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
- /* Define the operating system */
-#if (defined(__linux__))
-# define _DPT_LINUX
-#elif (defined(__bsdi__))
-# define _DPT_BSDI
-#elif (defined(__FreeBSD__))
-# define _DPT_FREE_BSD
-#else
-# define _DPT_SCO
-#endif
-
-#if defined (ZIL_CURSES)
-#define _DPT_CURSES
-#else
-#define _DPT_MOTIF
-#endif
-
- /* Redefine 'far' to nothing - no far pointer type required in UNIX */
-#define far
-
- /* Define the mutually exclusive semaphore type */
-#define SEMAPHORE_T unsigned int *
- /* Define a handle to a DLL */
-#define DLL_HANDLE_T unsigned int *
-
-#endif
diff --git a/drivers/scsi/dpt/osd_util.h b/drivers/scsi/dpt/osd_util.h
deleted file mode 100644
index b2613c2eaac7..000000000000
--- a/drivers/scsi/dpt/osd_util.h
+++ /dev/null
@@ -1,358 +0,0 @@
-/* BSDI osd_util.h,v 1.8 1998/06/03 19:14:58 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __OSD_UTIL_H
-#define __OSD_UTIL_H
-
-/*File - OSD_UTIL.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains defines and function prototypes that are
- *operating system dependent. The resources defined in this file
- *are not specific to any particular application.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Doug Anderson
- *Date: 1/7/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Definitions - Defines & Constants ----------------------------------------- */
-
-/*----------------------------- */
-/* Operating system selections: */
-/*----------------------------- */
-
-/*#define _DPT_MSDOS */
-/*#define _DPT_WIN_3X */
-/*#define _DPT_WIN_4X */
-/*#define _DPT_WIN_NT */
-/*#define _DPT_NETWARE */
-/*#define _DPT_OS2 */
-/*#define _DPT_SCO */
-/*#define _DPT_UNIXWARE */
-/*#define _DPT_SOLARIS */
-/*#define _DPT_NEXTSTEP */
-/*#define _DPT_BANYAN */
-
-/*-------------------------------- */
-/* Include the OS specific defines */
-/*-------------------------------- */
-
-/*#define OS_SELECTION From Above List */
-/*#define SEMAPHORE_T ??? */
-/*#define DLL_HANDLE_T ??? */
-
-#if (defined(KERNEL) && (defined(__FreeBSD__) || defined(__bsdi__)))
-# include "i386/isa/dpt_osd_defs.h"
-#else
-# include "osd_defs.h"
-#endif
-
-#ifndef DPT_UNALIGNED
- #define DPT_UNALIGNED
-#endif
-
-#ifndef DPT_EXPORT
- #define DPT_EXPORT
-#endif
-
-#ifndef DPT_IMPORT
- #define DPT_IMPORT
-#endif
-
-#ifndef DPT_RUNTIME_IMPORT
- #define DPT_RUNTIME_IMPORT DPT_IMPORT
-#endif
-
-/*--------------------- */
-/* OS dependent defines */
-/*--------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_WIN_3X)
- #define _DPT_16_BIT
-#else
- #define _DPT_32_BIT
-#endif
-
-#if defined (_DPT_SCO) || defined (_DPT_UNIXWARE) || defined (_DPT_SOLARIS) || defined (_DPT_AIX) || defined (SNI_MIPS) || defined (_DPT_BSDI) || defined (_DPT_FREE_BSD) || defined(_DPT_LINUX)
- #define _DPT_UNIX
-#endif
-
-#if defined (_DPT_WIN_3x) || defined (_DPT_WIN_4X) || defined (_DPT_WIN_NT) \
- || defined (_DPT_OS2)
- #define _DPT_DLL_SUPPORT
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X) && !defined (_DPT_NETWARE)
- #define _DPT_PREEMPTIVE
-#endif
-
-#if !defined (_DPT_MSDOS) && !defined (_DPT_WIN_3X)
- #define _DPT_MULTI_THREADED
-#endif
-
-#if !defined (_DPT_MSDOS)
- #define _DPT_MULTI_TASKING
-#endif
-
- /* These exist for platforms that */
- /* chunk when accessing mis-aligned */
- /* data */
-#if defined (SNI_MIPS) || defined (_DPT_SOLARIS)
- #if defined (_DPT_BIG_ENDIAN)
- #if !defined (_DPT_STRICT_ALIGN)
- #define _DPT_STRICT_ALIGN
- #endif
- #endif
-#endif
-
- /* Determine if in C or C++ mode */
-#ifdef __cplusplus
- #define _DPT_CPP
-#else
- #define _DPT_C
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Under Solaris the compiler refuses to accept code like: */
-/* { {"DPT"}, 0, NULL .... }, */
-/* and complains about the {"DPT"} part by saying "cannot use { } */
-/* to initialize char*". */
-/* */
-/* By defining these ugly macros we can get around this and also */
-/* not have to copy and #ifdef large sections of code. I know that */
-/* these macros are *really* ugly, but they should help reduce */
-/* maintenance in the long run. */
-/* */
-/*-------------------------------------------------------------------*/
-#if !defined (DPTSQO)
- #if defined (_DPT_SOLARIS)
- #define DPTSQO
- #define DPTSQC
- #else
- #define DPTSQO {
- #define DPTSQC }
- #endif /* solaris */
-#endif /* DPTSQO */
-
-
-/*---------------------- */
-/* OS dependent typedefs */
-/*---------------------- */
-
-#if defined (_DPT_MSDOS) || defined (_DPT_SCO)
- #define BYTE unsigned char
- #define WORD unsigned short
-#endif
-
-#ifndef _DPT_TYPEDEFS
- #define _DPT_TYPEDEFS
- typedef unsigned char uCHAR;
- typedef unsigned short uSHORT;
- typedef unsigned int uINT;
- typedef unsigned long uLONG;
-
- typedef union {
- uCHAR u8[4];
- uSHORT u16[2];
- uLONG u32;
- } access_U;
-#endif
-
-#if !defined (NULL)
- #define NULL 0
-#endif
-
-
-/*Prototypes - function ----------------------------------------------------- */
-
-#ifdef __cplusplus
- extern "C" { /* Declare all these functions as "C" functions */
-#endif
-
-/*------------------------ */
-/* Byte reversal functions */
-/*------------------------ */
-
- /* Reverses the byte ordering of a 2 byte variable */
-#if (!defined(osdSwap2))
- uSHORT osdSwap2(DPT_UNALIGNED uSHORT *);
-#endif // !osdSwap2
-
- /* Reverses the byte ordering of a 4 byte variable and shifts left 8 bits */
-#if (!defined(osdSwap3))
- uLONG osdSwap3(DPT_UNALIGNED uLONG *);
-#endif // !osdSwap3
-
-
-#ifdef _DPT_NETWARE
- #include "novpass.h" /* For DPT_Bswapl() prototype */
- /* Inline the byte swap */
- #ifdef __cplusplus
- inline uLONG osdSwap4(uLONG *inLong) {
- return *inLong = DPT_Bswapl(*inLong);
- }
- #else
- #define osdSwap4(inLong) DPT_Bswapl(inLong)
- #endif // cplusplus
-#else
- /* Reverses the byte ordering of a 4 byte variable */
-# if (!defined(osdSwap4))
- uLONG osdSwap4(DPT_UNALIGNED uLONG *);
-# endif // !osdSwap4
-
- /* The following functions ALWAYS swap regardless of the *
- * presence of DPT_BIG_ENDIAN */
-
- uSHORT trueSwap2(DPT_UNALIGNED uSHORT *);
- uLONG trueSwap4(DPT_UNALIGNED uLONG *);
-
-#endif // netware
-
-
-/*-------------------------------------*
- * Network order swap functions *
- * *
- * These functions/macros will be used *
- * by the structure insert()/extract() *
- * functions. *
- *
- * We will enclose all structure *
- * portability modifications inside *
- * #ifdefs. When we are ready, we *
- * will #define DPT_PORTABLE to begin *
- * using the modifications. *
- *-------------------------------------*/
-uLONG netSwap4(uLONG val);
-
-#if defined (_DPT_BIG_ENDIAN)
-
-// for big-endian we need to swap
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (((x) >> 8) | ((x) << 8))
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) netSwap4((x))
-#endif // NET_SWAP_4
-
-#else
-
-// for little-endian we don't need to do anything
-
-#ifndef NET_SWAP_2
-#define NET_SWAP_2(x) (x)
-#endif // NET_SWAP_2
-
-#ifndef NET_SWAP_4
-#define NET_SWAP_4(x) (x)
-#endif // NET_SWAP_4
-
-#endif // big endian
-
-
-
-/*----------------------------------- */
-/* Run-time loadable module functions */
-/*----------------------------------- */
-
- /* Loads the specified run-time loadable DLL */
-DLL_HANDLE_T osdLoadModule(uCHAR *);
- /* Unloads the specified run-time loadable DLL */
-uSHORT osdUnloadModule(DLL_HANDLE_T);
- /* Returns a pointer to a function inside a run-time loadable DLL */
-void * osdGetFnAddr(DLL_HANDLE_T,uCHAR *);
-
-/*--------------------------------------- */
-/* Mutually exclusive semaphore functions */
-/*--------------------------------------- */
-
- /* Create a named semaphore */
-SEMAPHORE_T osdCreateNamedSemaphore(char *);
- /* Create a mutually exlusive semaphore */
-SEMAPHORE_T osdCreateSemaphore(void);
- /* create an event semaphore */
-SEMAPHORE_T osdCreateEventSemaphore(void);
- /* create a named event semaphore */
-SEMAPHORE_T osdCreateNamedEventSemaphore(char *);
-
- /* Destroy the specified mutually exclusive semaphore object */
-uSHORT osdDestroySemaphore(SEMAPHORE_T);
- /* Request access to the specified mutually exclusive semaphore */
-uLONG osdRequestSemaphore(SEMAPHORE_T,uLONG);
- /* Release access to the specified mutually exclusive semaphore */
-uSHORT osdReleaseSemaphore(SEMAPHORE_T);
- /* wait for a event to happen */
-uLONG osdWaitForEventSemaphore(SEMAPHORE_T, uLONG);
- /* signal an event */
-uLONG osdSignalEventSemaphore(SEMAPHORE_T);
- /* reset the event */
-uLONG osdResetEventSemaphore(SEMAPHORE_T);
-
-/*----------------- */
-/* Thread functions */
-/*----------------- */
-
- /* Releases control to the task switcher in non-preemptive */
- /* multitasking operating systems. */
-void osdSwitchThreads(void);
-
- /* Starts a thread function */
-uLONG osdStartThread(void *,void *);
-
-/* what is my thread id */
-uLONG osdGetThreadID(void);
-
-/* wakes up the specifed thread */
-void osdWakeThread(uLONG);
-
-/* osd sleep for x milliseconds */
-void osdSleep(uLONG);
-
-#define DPT_THREAD_PRIORITY_LOWEST 0x00
-#define DPT_THREAD_PRIORITY_NORMAL 0x01
-#define DPT_THREAD_PRIORITY_HIGHEST 0x02
-
-uCHAR osdSetThreadPriority(uLONG tid, uCHAR priority);
-
-#ifdef __cplusplus
- } /* end the xtern "C" declaration */
-#endif
-
-#endif /* osd_util_h */
diff --git a/drivers/scsi/dpt/sys_info.h b/drivers/scsi/dpt/sys_info.h
deleted file mode 100644
index a4aa1c31ff72..000000000000
--- a/drivers/scsi/dpt/sys_info.h
+++ /dev/null
@@ -1,417 +0,0 @@
-/* BSDI sys_info.h,v 1.6 1998/06/03 19:14:59 karels Exp */
-
-/*
- * Copyright (c) 1996-1999 Distributed Processing Technology Corporation
- * All rights reserved.
- *
- * Redistribution and use in source form, with or without modification, are
- * permitted provided that redistributions of source code must retain the
- * above copyright notice, this list of conditions and the following disclaimer.
- *
- * This software is provided `as is' by Distributed Processing Technology and
- * any express or implied warranties, including, but not limited to, the
- * implied warranties of merchantability and fitness for a particular purpose,
- * are disclaimed. In no event shall Distributed Processing Technology be
- * liable for any direct, indirect, incidental, special, exemplary or
- * consequential damages (including, but not limited to, procurement of
- * substitute goods or services; loss of use, data, or profits; or business
- * interruptions) however caused and on any theory of liability, whether in
- * contract, strict liability, or tort (including negligence or otherwise)
- * arising in any way out of the use of this driver software, even if advised
- * of the possibility of such damage.
- *
- */
-
-#ifndef __SYS_INFO_H
-#define __SYS_INFO_H
-
-/*File - SYS_INFO.H
- ****************************************************************************
- *
- *Description:
- *
- * This file contains structure definitions for the OS dependent
- *layer system information buffers.
- *
- *Copyright Distributed Processing Technology, Corp.
- * 140 Candace Dr.
- * Maitland, Fl. 32751 USA
- * Phone: (407) 830-5522 Fax: (407) 260-5366
- * All Rights Reserved
- *
- *Author: Don Kemper
- *Date: 5/10/94
- *
- *Editors:
- *
- *Remarks:
- *
- *
- *****************************************************************************/
-
-
-/*Include Files ------------------------------------------------------------- */
-
-#include "osd_util.h"
-
-#ifndef NO_PACK
-#if defined (_DPT_AIX)
-#pragma options align=packed
-#else
-#pragma pack(1)
-#endif /* aix */
-#endif // no unpack
-
-
-/*struct - driveParam_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the drive parameters seen during
- *booting.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct driveParam_S {
-#else
- typedef struct {
-#endif
-
- uSHORT cylinders; /* Up to 1024 */
- uCHAR heads; /* Up to 255 */
- uCHAR sectors; /* Up to 63 */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } driveParam_S;
-#endif
-/*driveParam_S - end */
-
-
-/*struct - sysInfo_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the command system information that
- *should be returned by every OS dependent layer.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define SI_CMOS_Valid 0x0001
-#define SI_NumDrivesValid 0x0002
-#define SI_ProcessorValid 0x0004
-#define SI_MemorySizeValid 0x0008
-#define SI_DriveParamsValid 0x0010
-#define SI_SmartROMverValid 0x0020
-#define SI_OSversionValid 0x0040
-#define SI_OSspecificValid 0x0080 /* 1 if OS structure returned */
-#define SI_BusTypeValid 0x0100
-
-#define SI_ALL_VALID 0x0FFF /* All Std SysInfo is valid */
-#define SI_NO_SmartROM 0x8000
-
-/*busType - definitions */
-#define SI_ISA_BUS 0x00
-#define SI_MCA_BUS 0x01
-#define SI_EISA_BUS 0x02
-#define SI_PCI_BUS 0x04
-
-#ifdef __cplusplus
- struct sysInfo_S {
-#else
- typedef struct {
-#endif
-
- uCHAR drive0CMOS; /* CMOS Drive 0 Type */
- uCHAR drive1CMOS; /* CMOS Drive 1 Type */
- uCHAR numDrives; /* 0040:0075 contents */
- uCHAR processorFamily; /* Same as DPTSIG's definition */
- uCHAR processorType; /* Same as DPTSIG's definition */
- uCHAR smartROMMajorVersion;
- uCHAR smartROMMinorVersion; /* SmartROM version */
- uCHAR smartROMRevision;
- uSHORT flags; /* See bit definitions above */
- uSHORT conventionalMemSize; /* in KB */
- uINT extendedMemSize; /* in KB */
- uINT osType; /* Same as DPTSIG's definition */
- uCHAR osMajorVersion;
- uCHAR osMinorVersion; /* The OS version */
- uCHAR osRevision;
-#ifdef _SINIX_ADDON
- uCHAR busType; /* See defininitions above */
- uSHORT osSubRevision;
- uCHAR pad[2]; /* For alignment */
-#else
- uCHAR osSubRevision;
- uCHAR busType; /* See defininitions above */
- uCHAR pad[3]; /* For alignment */
-#endif
- driveParam_S drives[16]; /* SmartROM Logical Drives */
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } sysInfo_S;
-#endif
-/*sysInfo_S - end */
-
-
-/*struct - DOS_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *DOS workstation.
- *
- *---------------------------------------------------------------------------*/
-
-/*flags - bit definitions */
-#define DI_DOS_HIGH 0x01 /* DOS is loaded high */
-#define DI_DPMI_VALID 0x02 /* DPMI version is valid */
-
-#ifdef __cplusplus
- struct DOS_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR flags; /* See bit definitions above */
- uSHORT driverLocation; /* SmartROM BIOS address */
- uSHORT DOS_version;
- uSHORT DPMI_version;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } DOS_Info_S;
-#endif
-/*DOS_Info_S - end */
-
-
-/*struct - Netware_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Netware machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct Netware_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR driverName[13]; /* ie PM12NW31.DSK */
- uCHAR serverName[48];
- uCHAR netwareVersion; /* The Netware OS version */
- uCHAR netwareSubVersion;
- uCHAR netwareRevision;
- uSHORT maxConnections; /* Probably 250 or 1000 */
- uSHORT connectionsInUse;
- uSHORT maxVolumes;
- uCHAR unused;
- uCHAR SFTlevel;
- uCHAR TTSlevel;
-
- uCHAR clibMajorVersion; /* The CLIB.NLM version */
- uCHAR clibMinorVersion;
- uCHAR clibRevision;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } Netware_Info_S;
-#endif
-/*Netware_Info_S - end */
-
-
-/*struct - OS2_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *OS/2 machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct OS2_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } OS2_Info_S;
-#endif
-/*OS2_Info_S - end */
-
-
-/*struct - WinNT_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *Windows NT machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct WinNT_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } WinNT_Info_S;
-#endif
-/*WinNT_Info_S - end */
-
-
-/*struct - SCO_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to an
- *SCO UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct SCO_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } SCO_Info_S;
-#endif
-/*SCO_Info_S - end */
-
-
-/*struct - USL_Info_S - start
- *===========================================================================
- *
- *Description:
- *
- * This structure defines the system information specific to a
- *USL UNIX machine.
- *
- *---------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- struct USL_Info_S {
-#else
- typedef struct {
-#endif
-
- uCHAR something;
-
-#ifdef __cplusplus
-
-//---------- Portability Additions ----------- in sp_sinfo.cpp
-#ifdef DPT_PORTABLE
- uSHORT netInsert(dptBuffer_S *buffer);
- uSHORT netExtract(dptBuffer_S *buffer);
-#endif // DPT PORTABLE
-//--------------------------------------------
-
- };
-#else
- } USL_Info_S;
-#endif
-/*USL_Info_S - end */
-
-
- /* Restore default structure packing */
-#ifndef NO_UNPACK
-#if defined (_DPT_AIX)
-#pragma options align=reset
-#elif defined (UNPACK_FOUR)
-#pragma pack(4)
-#else
-#pragma pack()
-#endif /* aix */
-#endif // no unpack
-
-#endif // __SYS_INFO_H
-
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
deleted file mode 100644
index 93227c04ef59..000000000000
--- a/drivers/scsi/dpt_i2o.c
+++ /dev/null
@@ -1,3546 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/***************************************************************************
- dpti.c - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2000 by Adaptec
-
- July 30, 2001 First version being submitted
- for inclusion in the kernel. V2.4
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-/***************************************************************************
- * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
- - Support 2.6 kernel and DMA-mapping
- - ioctl fix for raid tools
- - use schedule_timeout in long long loop
- **************************************************************************/
-
-/*#define DEBUG 1 */
-/*#define UARTDELAY 1 */
-
-#include <linux/module.h>
-#include <linux/pgtable.h>
-
-MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
-MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
-
-////////////////////////////////////////////////////////////////
-
-#include <linux/ioctl.h> /* For SCSI-Passthrough */
-#include <linux/uaccess.h>
-
-#include <linux/stat.h>
-#include <linux/slab.h> /* for kmalloc() */
-#include <linux/pci.h> /* for PCI support */
-#include <linux/proc_fs.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h> /* for udelay */
-#include <linux/interrupt.h>
-#include <linux/kernel.h> /* for printk */
-#include <linux/sched.h>
-#include <linux/reboot.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/mutex.h>
-
-#include <asm/processor.h> /* for boot_cpu_data */
-#include <asm/io.h> /* for virt_to_bus, etc. */
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-
-#include "dpt/dptsig.h"
-#include "dpti.h"
-
-/*============================================================================
- * Create a binary signature - this is read by dptsig
- * Needed for our management apps
- *============================================================================
- */
-static DEFINE_MUTEX(adpt_mutex);
-static dpt_sig_S DPTI_sig = {
- {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
-#ifdef __i386__
- PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
-#elif defined(__ia64__)
- PROC_INTEL, PROC_IA64,
-#elif defined(__sparc__)
- PROC_ULTRASPARC, PROC_ULTRASPARC,
-#elif defined(__alpha__)
- PROC_ALPHA, PROC_ALPHA,
-#else
- (-1),(-1),
-#endif
- FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
- ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
- DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
-};
-
-
-
-
-/*============================================================================
- * Globals
- *============================================================================
- */
-
-static DEFINE_MUTEX(adpt_configuration_lock);
-
-static struct i2o_sys_tbl *sys_tbl;
-static dma_addr_t sys_tbl_pa;
-static int sys_tbl_ind;
-static int sys_tbl_len;
-
-static adpt_hba* hba_chain = NULL;
-static int hba_count = 0;
-
-static struct class *adpt_sysfs_class;
-
-static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
-#endif
-
-static const struct file_operations adpt_fops = {
- .unlocked_ioctl = adpt_unlocked_ioctl,
- .open = adpt_open,
- .release = adpt_close,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_adpt_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-/* Structures and definitions for synchronous message posting.
- * See adpt_i2o_post_wait() for description
- * */
-struct adpt_i2o_post_wait_data
-{
- int status;
- u32 id;
- adpt_wait_queue_head_t *wq;
- struct adpt_i2o_post_wait_data *next;
-};
-
-static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
-static u32 adpt_post_wait_id = 0;
-static DEFINE_SPINLOCK(adpt_post_wait_lock);
-
-
-/*============================================================================
- * Functions
- *============================================================================
- */
-
-static inline int dpt_dma64(adpt_hba *pHba)
-{
- return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
-}
-
-static inline u32 dma_high(dma_addr_t addr)
-{
- return upper_32_bits(addr);
-}
-
-static inline u32 dma_low(dma_addr_t addr)
-{
- return (u32)addr;
-}
-
-static u8 adpt_read_blink_led(adpt_hba* host)
-{
- if (host->FwDebugBLEDflag_P) {
- if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
- return readb(host->FwDebugBLEDvalue_P);
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Scsi host template interface functions
- *============================================================================
- */
-
-#ifdef MODULE
-static struct pci_device_id dptids[] = {
- { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- { 0, }
-};
-#endif
-
-MODULE_DEVICE_TABLE(pci,dptids);
-
-static int adpt_detect(struct scsi_host_template* sht)
-{
- struct pci_dev *pDev = NULL;
- adpt_hba *pHba;
- adpt_hba *next;
-
- PINFO("Detecting Adaptec I2O RAID controllers...\n");
-
- /* search for all Adatpec I2O RAID cards */
- while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
- if(pDev->device == PCI_DPT_DEVICE_ID ||
- pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
- if(adpt_install_hba(sht, pDev) ){
- PERROR("Could not Init an I2O RAID device\n");
- PERROR("Will not try to detect others.\n");
- return hba_count-1;
- }
- pci_dev_get(pDev);
- }
- }
-
- /* In INIT state, Activate IOPs */
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- // Activate does get status , init outbound, and get hrt
- if (adpt_i2o_activate_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- }
- }
-
-
- /* Active IOPs in HOLD state */
-
-rebuild_sys_tab:
- if (hba_chain == NULL)
- return 0;
-
- /*
- * If build_sys_table fails, we kill everything and bail
- * as we can't init the IOPs w/o a system table
- */
- if (adpt_i2o_build_sys_table() < 0) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
-
- PDEBUG("HBA's in HOLD state\n");
-
- /* If IOP don't get online, we need to rebuild the System table */
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (adpt_i2o_online_hba(pHba) < 0) {
- adpt_i2o_delete_hba(pHba);
- goto rebuild_sys_tab;
- }
- }
-
- /* Active IOPs now in OPERATIONAL state */
- PDEBUG("HBA's in OPERATIONAL state\n");
-
- printk("dpti: If you have a lot of devices this could take a few minutes.\n");
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
- if (adpt_i2o_lct_get(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
-
- if (adpt_i2o_parse_lct(pHba) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- adpt_inquiry(pHba);
- }
-
- adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
- if (IS_ERR(adpt_sysfs_class)) {
- printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
- adpt_sysfs_class = NULL;
- }
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- if (adpt_scsi_host_alloc(pHba, sht) < 0){
- adpt_i2o_delete_hba(pHba);
- continue;
- }
- pHba->initialized = TRUE;
- pHba->state &= ~DPTI_STATE_RESET;
- if (adpt_sysfs_class) {
- struct device *dev = device_create(adpt_sysfs_class,
- NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
- "dpti%d", pHba->unit);
- if (IS_ERR(dev)) {
- printk(KERN_WARNING"dpti%d: unable to "
- "create device in dpt_i2o class\n",
- pHba->unit);
- }
- }
- }
-
- // Register our control device node
- // nodes will need to be created in /dev to access this
- // the nodes can not be created from within the driver
- if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
- adpt_i2o_sys_shutdown();
- return 0;
- }
- return hba_count;
-}
-
-
-static void adpt_release(adpt_hba *pHba)
-{
- struct Scsi_Host *shost = pHba->host;
-
- scsi_remove_host(shost);
-// adpt_i2o_quiesce_hba(pHba);
- adpt_i2o_delete_hba(pHba);
- scsi_host_put(shost);
-}
-
-
-static void adpt_inquiry(adpt_hba* pHba)
-{
- u32 msg[17];
- u32 *mptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- u32 len;
- u32 reqlen;
- u8* buf;
- dma_addr_t addr;
- u8 scb[16];
- s32 rcode;
-
- memset(msg, 0, sizeof(msg));
- buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
- if(!buf){
- printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
- return;
- }
- memset((void*)buf, 0, 36);
-
- len = 36;
- direction = 0x00000000;
- scsidir =0x40000000; // DATA IN (iop<--dev)
-
- if (dpt_dma64(pHba))
- reqlen = 17; // SINGLE SGE, 64 bit
- else
- reqlen = 14; // SINGLE SGE, 32 bit
- /* Stick the headers on */
- msg[0] = reqlen<<16 | SGL_OFFSET_12;
- msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
- msg[2] = 0;
- msg[3] = 0;
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
- msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
-
- mptr=msg+7;
-
- memset(scb, 0, sizeof(scb));
- // Write SCSI command into the message - always 16 byte block
- scb[0] = INQUIRY;
- scb[1] = 0;
- scb[2] = 0;
- scb[3] = 0;
- scb[4] = 36;
- scb[5] = 0;
- // Don't care about the rest of scb
-
- memcpy(mptr, scb, sizeof(scb));
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
-
- /* Now fill in the SGList and command */
- *lenptr = len;
- if (dpt_dma64(pHba)) {
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = dma_low(addr);
- *mptr++ = dma_high(addr);
- } else {
- *mptr++ = 0xD0000000|direction|len;
- *mptr++ = addr;
- }
-
- // Send it on it's way
- rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
- if (rcode != 0) {
- sprintf(pHba->detail, "Adaptec I2O RAID");
- printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
- if (rcode != -ETIME && rcode != -EINTR)
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- } else {
- memset(pHba->detail, 0, sizeof(pHba->detail));
- memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
- memcpy(&(pHba->detail[16]), " Model: ", 8);
- memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
- memcpy(&(pHba->detail[40]), " FW: ", 4);
- memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
- pHba->detail[48] = '\0'; /* precautionary */
- dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
- }
- adpt_i2o_status_get(pHba);
- return ;
-}
-
-
-static int adpt_slave_configure(struct scsi_device * device)
-{
- struct Scsi_Host *host = device->host;
-
- if (host->can_queue && device->tagged_supported) {
- scsi_change_queue_depth(device,
- host->can_queue - 1);
- }
- return 0;
-}
-
-static int adpt_queue_lck(struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba = NULL;
- struct adpt_device* pDev = NULL; /* dpt per device information */
-
- /*
- * SCSI REQUEST_SENSE commands will be executed automatically by the
- * Host Adapter for any errors, so they should not be executed
- * explicitly unless the Sense Data is zero indicating that no error
- * occurred.
- */
-
- if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
- cmd->result = (DID_OK << 16);
- scsi_done(cmd);
- return 0;
- }
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- if (!pHba) {
- return FAILED;
- }
-
- rmb();
- if ((pHba->state) & DPTI_STATE_RESET)
- return SCSI_MLQUEUE_HOST_BUSY;
-
- // TODO if the cmd->device if offline then I may need to issue a bus rescan
- // followed by a get_lct to see if the device is there anymore
- if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
- /*
- * First command request for this device. Set up a pointer
- * to the device structure. This should be a TEST_UNIT_READY
- * command from scan_scsis_single.
- */
- if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
- // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
- // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
- cmd->result = (DID_NO_CONNECT << 16);
- scsi_done(cmd);
- return 0;
- }
- cmd->device->hostdata = pDev;
- }
- pDev->pScsi_dev = cmd->device;
-
- /*
- * If we are being called from when the device is being reset,
- * delay processing of the command until later.
- */
- if (pDev->state & DPTI_DEV_RESET ) {
- return FAILED;
- }
- return adpt_scsi_to_i2o(pHba, cmd, pDev);
-}
-
-static DEF_SCSI_QCMD(adpt_queue)
-
-static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
- sector_t capacity, int geom[])
-{
- int heads=-1;
- int sectors=-1;
- int cylinders=-1;
-
- // *** First lets set the default geometry ****
-
- // If the capacity is less than ox2000
- if (capacity < 0x2000 ) { // floppy
- heads = 18;
- sectors = 2;
- }
- // else if between 0x2000 and 0x20000
- else if (capacity < 0x20000) {
- heads = 64;
- sectors = 32;
- }
- // else if between 0x20000 and 0x40000
- else if (capacity < 0x40000) {
- heads = 65;
- sectors = 63;
- }
- // else if between 0x4000 and 0x80000
- else if (capacity < 0x80000) {
- heads = 128;
- sectors = 63;
- }
- // else if greater than 0x80000
- else {
- heads = 255;
- sectors = 63;
- }
- cylinders = sector_div(capacity, heads * sectors);
-
- // Special case if CDROM
- if(sdev->type == 5) { // CDROM
- heads = 252;
- sectors = 63;
- cylinders = 1111;
- }
-
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
-
- PDEBUG("adpt_bios_param: exit\n");
- return 0;
-}
-
-
-static const char *adpt_info(struct Scsi_Host *host)
-{
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
- return (char *) (pHba->detail);
-}
-
-static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- struct adpt_device* d;
- int id;
- int chan;
- adpt_hba* pHba;
- int unit;
-
- // Find HBA (host bus adapter) we are looking for
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->host == host) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return 0;
- }
- host = pHba->host;
-
- seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
- seq_printf(m, "%s\n", pHba->detail);
- seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
- pHba->host->host_no, pHba->name, host->irq);
- seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
- host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
-
- seq_puts(m, "Devices:\n");
- for(chan = 0; chan < MAX_CHANNEL; chan++) {
- for(id = 0; id < MAX_ID; id++) {
- d = pHba->channel[chan].device[id];
- while(d) {
- seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
- seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
-
- unit = d->pI2o_dev->lct_data.tid;
- seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
- unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
- scsi_device_online(d->pScsi_dev)? "online":"offline");
- d = d->next_lun;
- }
- }
- }
- return 0;
-}
-
-/*
- * Turn a pointer to ioctl reply data into an u32 'context'
- */
-static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
-{
-#if BITS_PER_LONG == 32
- return (u32)(unsigned long)reply;
-#else
- ulong flags = 0;
- u32 nr, i;
-
- spin_lock_irqsave(pHba->host->host_lock, flags);
- nr = ARRAY_SIZE(pHba->ioctl_reply_context);
- for (i = 0; i < nr; i++) {
- if (pHba->ioctl_reply_context[i] == NULL) {
- pHba->ioctl_reply_context[i] = reply;
- break;
- }
- }
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- if (i >= nr) {
- printk(KERN_WARNING"%s: Too many outstanding "
- "ioctl commands\n", pHba->name);
- return (u32)-1;
- }
-
- return i;
-#endif
-}
-
-/*
- * Go from an u32 'context' to a pointer to ioctl reply data.
- */
-static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
-{
-#if BITS_PER_LONG == 32
- return (void *)(unsigned long)context;
-#else
- void *p = pHba->ioctl_reply_context[context];
- pHba->ioctl_reply_context[context] = NULL;
-
- return p;
-#endif
-}
-
-/*===========================================================================
- * Error Handling routines
- *===========================================================================
- */
-
-static int adpt_abort(struct scsi_cmnd * cmd)
-{
- adpt_hba* pHba = NULL; /* host bus adapter structure */
- struct adpt_device* dptdevice; /* dpt per device information */
- u32 msg[5];
- int rcode;
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
- if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
- printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
- return FAILED;
- }
-
- memset(msg, 0, sizeof(msg));
- msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
- msg[2] = 0;
- msg[3]= 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
- return SUCCESS;
-}
-
-
-#define I2O_DEVICE_RESET 0x27
-// This is the same for BLK and SCSI devices
-// NOTE this is wrong in the i2o.h definitions
-// This is not currently supported by our adapter but we issue it anyway
-static int adpt_device_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
- int old_state;
- struct adpt_device* d = cmd->device->hostdata;
-
- pHba = (void*) cmd->device->host->hostdata[0];
- printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
- if (!d) {
- printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
- return FAILED;
- }
- memset(msg, 0, sizeof(msg));
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
- msg[2] = 0;
- msg[3] = 0;
-
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- old_state = d->state;
- d->state |= DPTI_DEV_RESET;
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- d->state = old_state;
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- if(rcode == -EOPNOTSUPP ){
- printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
- return FAILED;
- }
- printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
- return SUCCESS;
- }
-}
-
-
-#define I2O_HBA_BUS_RESET 0x87
-// This version of bus reset is called by the eh_error handler
-static int adpt_bus_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- u32 msg[4];
- u32 rcode;
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- memset(msg, 0, sizeof(msg));
- printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
- msg[2] = 0;
- msg[3] = 0;
- if (pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
- if (pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (rcode != 0) {
- printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
- return FAILED;
- } else {
- printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
- return SUCCESS;
- }
-}
-
-// This version of reset is called by the eh_error_handler
-static int __adpt_reset(struct scsi_cmnd* cmd)
-{
- adpt_hba* pHba;
- int rcode;
- char name[32];
-
- pHba = (adpt_hba*)cmd->device->host->hostdata[0];
- strncpy(name, pHba->name, sizeof(name));
- printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
- rcode = adpt_hba_reset(pHba);
- if(rcode == 0){
- printk(KERN_WARNING"%s: HBA reset complete\n", name);
- return SUCCESS;
- } else {
- printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
- return FAILED;
- }
-}
-
-static int adpt_reset(struct scsi_cmnd* cmd)
-{
- int rc;
-
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __adpt_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
-
- return rc;
-}
-
-// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
-static int adpt_hba_reset(adpt_hba* pHba)
-{
- int rcode;
-
- pHba->state |= DPTI_STATE_RESET;
-
- // Activate does get status , init outbound, and get hrt
- if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
- printk(KERN_ERR "%s: Could not activate\n", pHba->name);
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_build_sys_table()) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in HOLD state\n",pHba->name);
-
- if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
-
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
-
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
- adpt_i2o_delete_hba(pHba);
- return rcode;
- }
- pHba->state &= ~DPTI_STATE_RESET;
-
- scsi_host_complete_all_commands(pHba->host, DID_RESET);
- return 0; /* return success */
-}
-
-/*===========================================================================
- *
- *===========================================================================
- */
-
-
-static void adpt_i2o_sys_shutdown(void)
-{
- adpt_hba *pHba, *pNext;
- struct adpt_i2o_post_wait_data *p1, *old;
-
- printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
- printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
- /* Delete all IOPs from the controller chain */
- /* They should have already been released by the
- * scsi-core
- */
- for (pHba = hba_chain; pHba; pHba = pNext) {
- pNext = pHba->next;
- adpt_i2o_delete_hba(pHba);
- }
-
- /* Remove any timedout entries from the wait queue. */
-// spin_lock_irqsave(&adpt_post_wait_lock, flags);
- /* Nothing should be outstanding at this point so just
- * free them
- */
- for(p1 = adpt_post_wait_queue; p1;) {
- old = p1;
- p1 = p1->next;
- kfree(old);
- }
-// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
- adpt_post_wait_queue = NULL;
-
- printk(KERN_INFO "Adaptec I2O controllers down.\n");
-}
-
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
-{
-
- adpt_hba* pHba = NULL;
- adpt_hba* p = NULL;
- ulong base_addr0_phys = 0;
- ulong base_addr1_phys = 0;
- u32 hba_map0_area_size = 0;
- u32 hba_map1_area_size = 0;
- void __iomem *base_addr_virt = NULL;
- void __iomem *msg_addr_virt = NULL;
- int dma64 = 0;
-
- int raptorFlag = FALSE;
-
- if(pci_enable_device(pDev)) {
- return -EINVAL;
- }
-
- if (pci_request_regions(pDev, "dpt_i2o")) {
- PERROR("dpti: adpt_config_hba: pci request region failed\n");
- return -EINVAL;
- }
-
- pci_set_master(pDev);
-
- /*
- * See if we should enable dma64 mode.
- */
- if (sizeof(dma_addr_t) > 4 &&
- dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
- dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
- dma64 = 1;
-
- if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
- return -EINVAL;
-
- /* adapter only supports message blocks below 4GB */
- dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
-
- base_addr0_phys = pci_resource_start(pDev,0);
- hba_map0_area_size = pci_resource_len(pDev,0);
-
- // Check if standard PCI card or single BAR Raptor
- if(pDev->device == PCI_DPT_DEVICE_ID){
- if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
- // Raptor card with this device id needs 4M
- hba_map0_area_size = 0x400000;
- } else { // Not Raptor - it is a PCI card
- if(hba_map0_area_size > 0x100000 ){
- hba_map0_area_size = 0x100000;
- }
- }
- } else {// Raptor split BAR config
- // Use BAR1 in this configuration
- base_addr1_phys = pci_resource_start(pDev,1);
- hba_map1_area_size = pci_resource_len(pDev,1);
- raptorFlag = TRUE;
- }
-
-#if BITS_PER_LONG == 64
- /*
- * The original Adaptec 64 bit driver has this comment here:
- * "x86_64 machines need more optimal mappings"
- *
- * I assume some HBAs report ridiculously large mappings
- * and we need to limit them on platforms with IOMMUs.
- */
- if (raptorFlag == TRUE) {
- if (hba_map0_area_size > 128)
- hba_map0_area_size = 128;
- if (hba_map1_area_size > 524288)
- hba_map1_area_size = 524288;
- } else {
- if (hba_map0_area_size > 524288)
- hba_map0_area_size = 524288;
- }
-#endif
-
- base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
- if (!base_addr_virt) {
- pci_release_regions(pDev);
- PERROR("dpti: adpt_config_hba: io remap failed\n");
- return -EINVAL;
- }
-
- if(raptorFlag == TRUE) {
- msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
- if (!msg_addr_virt) {
- PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -EINVAL;
- }
- } else {
- msg_addr_virt = base_addr_virt;
- }
-
- // Allocate and zero the data structure
- pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
- if (!pHba) {
- if (msg_addr_virt != base_addr_virt)
- iounmap(msg_addr_virt);
- iounmap(base_addr_virt);
- pci_release_regions(pDev);
- return -ENOMEM;
- }
-
- mutex_lock(&adpt_configuration_lock);
-
- if(hba_chain != NULL){
- for(p = hba_chain; p->next; p = p->next);
- p->next = pHba;
- } else {
- hba_chain = pHba;
- }
- pHba->next = NULL;
- pHba->unit = hba_count;
- sprintf(pHba->name, "dpti%d", hba_count);
- hba_count++;
-
- mutex_unlock(&adpt_configuration_lock);
-
- pHba->pDev = pDev;
- pHba->base_addr_phys = base_addr0_phys;
-
- // Set up the Virtual Base Address of the I2O Device
- pHba->base_addr_virt = base_addr_virt;
- pHba->msg_addr_virt = msg_addr_virt;
- pHba->irq_mask = base_addr_virt+0x30;
- pHba->post_port = base_addr_virt+0x40;
- pHba->reply_port = base_addr_virt+0x44;
-
- pHba->hrt = NULL;
- pHba->lct = NULL;
- pHba->lct_size = 0;
- pHba->status_block = NULL;
- pHba->post_count = 0;
- pHba->state = DPTI_STATE_RESET;
- pHba->pDev = pDev;
- pHba->devices = NULL;
- pHba->dma64 = dma64;
-
- // Initializing the spinlocks
- spin_lock_init(&pHba->state_lock);
- spin_lock_init(&adpt_post_wait_lock);
-
- if(raptorFlag == 0){
- printk(KERN_INFO "Adaptec I2O RAID controller"
- " %d at %p size=%x irq=%d%s\n",
- hba_count-1, base_addr_virt,
- hba_map0_area_size, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- } else {
- printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
- hba_count-1, pDev->irq,
- dma64 ? " (64-bit DMA)" : "");
- printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
- printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
- }
-
- if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
- printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
- adpt_i2o_delete_hba(pHba);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static void adpt_i2o_delete_hba(adpt_hba* pHba)
-{
- adpt_hba* p1;
- adpt_hba* p2;
- struct i2o_device* d;
- struct i2o_device* next;
- int i;
- int j;
- struct adpt_device* pDev;
- struct adpt_device* pNext;
-
-
- mutex_lock(&adpt_configuration_lock);
- if(pHba->host){
- free_irq(pHba->host->irq, pHba);
- }
- p2 = NULL;
- for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
- if(p1 == pHba) {
- if(p2) {
- p2->next = p1->next;
- } else {
- hba_chain = p1->next;
- }
- break;
- }
- }
-
- hba_count--;
- mutex_unlock(&adpt_configuration_lock);
-
- iounmap(pHba->base_addr_virt);
- pci_release_regions(pHba->pDev);
- if(pHba->msg_addr_virt != pHba->base_addr_virt){
- iounmap(pHba->msg_addr_virt);
- }
- if(pHba->FwDebugBuffer_P)
- iounmap(pHba->FwDebugBuffer_P);
- if(pHba->hrt) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
- pHba->hrt, pHba->hrt_pa);
- }
- if(pHba->lct) {
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- }
- if(pHba->status_block) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
- pHba->status_block, pHba->status_block_pa);
- }
- if(pHba->reply_pool) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- for(d = pHba->devices; d ; d = next){
- next = d->next;
- kfree(d);
- }
- for(i = 0 ; i < pHba->top_scsi_channel ; i++){
- for(j = 0; j < MAX_ID; j++){
- if(pHba->channel[i].device[j] != NULL){
- for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
- pNext = pDev->next_lun;
- kfree(pDev);
- }
- }
- }
- }
- pci_dev_put(pHba->pDev);
- if (adpt_sysfs_class)
- device_destroy(adpt_sysfs_class,
- MKDEV(DPTI_I2O_MAJOR, pHba->unit));
- kfree(pHba);
-
- if(hba_count <= 0){
- unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
- if (adpt_sysfs_class) {
- class_destroy(adpt_sysfs_class);
- adpt_sysfs_class = NULL;
- }
- }
-}
-
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
-{
- struct adpt_device* d;
-
- if (chan >= MAX_CHANNEL)
- return NULL;
-
- d = pHba->channel[chan].device[id];
- if(!d || d->tid == 0) {
- return NULL;
- }
-
- /* If it is the only lun at that address then this should match*/
- if(d->scsi_lun == lun){
- return d;
- }
-
- /* else we need to look through all the luns */
- for(d=d->next_lun ; d ; d = d->next_lun){
- if(d->scsi_lun == lun){
- return d;
- }
- }
- return NULL;
-}
-
-
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
-{
- // I used my own version of the WAIT_QUEUE_HEAD
- // to handle some version differences
- // When embedded in the kernel this could go back to the vanilla one
- ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
- int status = 0;
- ulong flags = 0;
- struct adpt_i2o_post_wait_data *p1, *p2;
- struct adpt_i2o_post_wait_data *wait_data =
- kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
- DECLARE_WAITQUEUE(wait, current);
-
- if (!wait_data)
- return -ENOMEM;
-
- /*
- * The spin locking is needed to keep anyone from playing
- * with the queue pointers and id while we do the same
- */
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- // TODO we need a MORE unique way of getting ids
- // to support async LCT get
- wait_data->next = adpt_post_wait_queue;
- adpt_post_wait_queue = wait_data;
- adpt_post_wait_id++;
- adpt_post_wait_id &= 0x7fff;
- wait_data->id = adpt_post_wait_id;
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- wait_data->wq = &adpt_wq_i2o_post;
- wait_data->status = -ETIMEDOUT;
-
- add_wait_queue(&adpt_wq_i2o_post, &wait);
-
- msg[2] |= 0x80000000 | ((u32)wait_data->id);
- timeout *= HZ;
- if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
- set_current_state(TASK_INTERRUPTIBLE);
- if(pHba->host)
- spin_unlock_irq(pHba->host->host_lock);
- if (!timeout)
- schedule();
- else{
- timeout = schedule_timeout(timeout);
- if (timeout == 0) {
- // I/O issued, but cannot get result in
- // specified time. Freeing resorces is
- // dangerous.
- status = -ETIME;
- }
- }
- if(pHba->host)
- spin_lock_irq(pHba->host->host_lock);
- }
- remove_wait_queue(&adpt_wq_i2o_post, &wait);
-
- if(status == -ETIMEDOUT){
- printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
- // We will have to free the wait_data memory during shutdown
- return status;
- }
-
- /* Remove the entry from the queue. */
- p2 = NULL;
- spin_lock_irqsave(&adpt_post_wait_lock, flags);
- for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
- if(p1 == wait_data) {
- if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
- status = -EOPNOTSUPP;
- }
- if(p2) {
- p2->next = p1->next;
- } else {
- adpt_post_wait_queue = p1->next;
- }
- break;
- }
- }
- spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
-
- kfree(wait_data);
-
- return status;
-}
-
-
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
-{
-
- u32 m = EMPTY_QUEUE;
- u32 __iomem *msg;
- ulong timeout = jiffies + 30*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg = pHba->msg_addr_virt + m;
- memcpy_toio(msg, data, len);
- wmb();
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- return 0;
-}
-
-
-static void adpt_i2o_post_wait_complete(u32 context, int status)
-{
- struct adpt_i2o_post_wait_data *p1 = NULL;
- /*
- * We need to search through the adpt_post_wait
- * queue to see if the given message is still
- * outstanding. If not, it means that the IOP
- * took longer to respond to the message than we
- * had allowed and timer has already expired.
- * Not much we can do about that except log
- * it for debug purposes, increase timeout, and recompile
- *
- * Lock needed to keep anyone from moving queue pointers
- * around while we're looking through them.
- */
-
- context &= 0x7fff;
-
- spin_lock(&adpt_post_wait_lock);
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- if(p1->id == context) {
- p1->status = status;
- spin_unlock(&adpt_post_wait_lock);
- wake_up_interruptible(p1->wq);
- return;
- }
- }
- spin_unlock(&adpt_post_wait_lock);
- // If this happens we lose commands that probably really completed
- printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
- printk(KERN_DEBUG" Tasks in wait queue:\n");
- for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
- printk(KERN_DEBUG" %d\n",p1->id);
- }
- return;
-}
-
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
-{
- u32 msg[8];
- u8* status;
- dma_addr_t addr;
- u32 m = EMPTY_QUEUE ;
- ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
-
- if(pHba->initialized == FALSE) { // First time reset should be quick
- timeout = jiffies + (25*HZ);
- } else {
- adpt_i2o_quiesce_hba(pHba);
- }
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"Timeout waiting for message!\n");
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if(status == NULL) {
- adpt_send_nop(pHba, m);
- printk(KERN_ERR"IOP reset failed - no free memory.\n");
- return -ENOMEM;
- }
-
- msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]=0;
- msg[3]=0;
- msg[4]=0;
- msg[5]=0;
- msg[6]=dma_low(addr);
- msg[7]=dma_high(addr);
-
- memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
- wmb();
- writel(m, pHba->post_port);
- wmb();
-
- while(*status == 0){
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we cannot
- free these because controller may awake and corrupt
- those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
- PDEBUG("%s: Reset in progress...\n", pHba->name);
- // Here we wait for message frame to become available
- // indicated that reset has finished
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (m == EMPTY_QUEUE);
- // Flush the offset
- adpt_send_nop(pHba, m);
- }
- adpt_i2o_status_get(pHba);
- if(*status == 0x02 ||
- pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
- pHba->name);
- } else {
- PDEBUG("%s: Reset completed.\n", pHba->name);
- }
-
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-#ifdef UARTDELAY
- // This delay is to allow someone attached to the card through the debug UART to
- // set up the dump levels that they want before the rest of the initialization sequence
- adpt_delay(20000);
-#endif
- return 0;
-}
-
-
-static int adpt_i2o_parse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // larger than 7, or 8 ...
- struct adpt_device* pDev;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- /*
- * If we have hidden devices, we need to inform the upper layers about
- * the possible maximum id reference to handle device access when
- * an array is disassembled. This code has no other purpose but to
- * allow us future access to devices that are currently hidden
- * behind arrays, hotspares or have not been configured (JBOD mode).
- */
- if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
- lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
- lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- continue;
- }
- tid = lct->lct_entry[i].tid;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- continue;
- }
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if (scsi_id >= MAX_ID){
- printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
- continue;
- }
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- }
- d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
- if(d==NULL)
- {
- printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- tid = d->lct_data.tid;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
- }
- bus_no = 0;
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
- tid = d->lct_data.tid;
- // TODO get the bus_no from hrt-but for now they are in order
- //bus_no =
- if(bus_no > pHba->top_scsi_channel){
- pHba->top_scsi_channel = bus_no;
- }
- pHba->channel[bus_no].type = d->lct_data.class_id;
- pHba->channel[bus_no].tid = tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
- {
- pHba->channel[bus_no].scsi_id = buf[1];
- PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
- }
- // TODO remove - this is just until we get from hrt
- bus_no++;
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
- break;
- }
- }
- }
-
- // Setup adpt_device table
- for(d = pHba->devices; d ; d = d->next) {
- if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
-
- tid = d->lct_data.tid;
- scsi_id = -1;
- // I2O_DPT_DEVICE_INFO_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
- bus_no = buf[0]>>16;
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
- continue;
- }
- if (scsi_id >= MAX_ID) {
- continue;
- }
- if( pHba->channel[bus_no].device[scsi_id] == NULL){
- pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- for( pDev = pHba->channel[bus_no].device[scsi_id];
- pDev->next_lun; pDev = pDev->next_lun){
- }
- pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
- if(pDev->next_lun == NULL) {
- return -ENOMEM;
- }
- pDev = pDev->next_lun;
- }
- pDev->tid = tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- }
- if(scsi_id == -1){
- printk(KERN_WARNING"Could not find SCSI ID for %s\n",
- d->lct_data.identity_tag);
- }
- }
- }
- return 0;
-}
-
-
-/*
- * Each I2O controller has a chain of devices on it - these match
- * the useful parts of the LCT of the board.
- */
-
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
-{
- mutex_lock(&adpt_configuration_lock);
- d->controller=pHba;
- d->owner=NULL;
- d->next=pHba->devices;
- d->prev=NULL;
- if (pHba->devices != NULL){
- pHba->devices->prev=d;
- }
- pHba->devices=d;
- *d->dev_name = 0;
-
- mutex_unlock(&adpt_configuration_lock);
- return 0;
-}
-
-static int adpt_open(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- mutex_lock(&adpt_mutex);
- //TODO check for root access
- //
- minor = iminor(inode);
- if (minor >= hba_count) {
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- if (pHba == NULL) {
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
- return -ENXIO;
- }
-
-// if(pHba->in_use){
- // mutex_unlock(&adpt_configuration_lock);
-// return -EBUSY;
-// }
-
- pHba->in_use = 1;
- mutex_unlock(&adpt_configuration_lock);
- mutex_unlock(&adpt_mutex);
-
- return 0;
-}
-
-static int adpt_close(struct inode *inode, struct file *file)
-{
- int minor;
- adpt_hba* pHba;
-
- minor = iminor(inode);
- if (minor >= hba_count) {
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if (pHba == NULL) {
- return -ENXIO;
- }
-
- pHba->in_use = 0;
-
- return 0;
-}
-
-
-static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
-{
- u32 msg[MAX_MESSAGE_SIZE];
- u32* reply = NULL;
- u32 size = 0;
- u32 reply_size = 0;
- u32 __user *user_msg = arg;
- u32 __user * user_reply = NULL;
- void **sg_list = NULL;
- u32 sg_offset = 0;
- u32 sg_count = 0;
- int sg_index = 0;
- u32 i = 0;
- u32 rcode = 0;
- void *p = NULL;
- dma_addr_t addr;
- ulong flags = 0;
-
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- return -EFAULT;
- }
- size = size>>16;
-
- user_reply = &user_msg[size];
- if(size > MAX_MESSAGE_SIZE){
- return -EFAULT;
- }
- size *= 4; // Convert to bytes
-
- /* Copy in the user's I2O command */
- if(copy_from_user(msg, user_msg, size)) {
- return -EFAULT;
- }
- get_user(reply_size, &user_reply[0]);
- reply_size = reply_size>>16;
- if(reply_size > REPLY_FRAME_SIZE){
- reply_size = REPLY_FRAME_SIZE;
- }
- reply_size *= 4;
- reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
- if(reply == NULL) {
- printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
- return -ENOMEM;
- }
- sg_offset = (msg[0]>>4)&0xf;
- msg[2] = 0x40000000; // IOCTL context
- msg[3] = adpt_ioctl_to_context(pHba, reply);
- if (msg[3] == (u32)-1) {
- rcode = -EBUSY;
- goto free;
- }
-
- sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
- if (!sg_list) {
- rcode = -ENOMEM;
- goto free;
- }
- if(sg_offset) {
- // TODO add 64 bit API
- struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
- if (sg_count > pHba->sg_tablesize){
- printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
- rcode = -EINVAL;
- goto free;
- }
-
- for(i = 0; i < sg_count; i++) {
- int sg_size;
-
- if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
- printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
- rcode = -EINVAL;
- goto cleanup;
- }
- sg_size = sg[i].flag_count & 0xffffff;
- /* Allocate memory for the transfer */
- p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
- if(!p) {
- printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
- pHba->name,sg_size,i,sg_count);
- rcode = -ENOMEM;
- goto cleanup;
- }
- sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
- /* Copy in the user's SG buffer if necessary */
- if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
- // sg_simple_element API is 32 bit
- if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
- printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- /* sg_simple_element API is 32 bit, but addr < 4GB */
- sg[i].addr_bus = addr;
- }
- }
-
- do {
- /*
- * Stop any new commands from enterring the
- * controller while processing the ioctl
- */
- if (pHba->host) {
- scsi_block_requests(pHba->host);
- spin_lock_irqsave(pHba->host->host_lock, flags);
- }
- rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
- if (rcode != 0)
- printk("adpt_i2o_passthru: post wait failed %d %p\n",
- rcode, reply);
- if (pHba->host) {
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- scsi_unblock_requests(pHba->host);
- }
- } while (rcode == -ETIMEDOUT);
-
- if(rcode){
- goto cleanup;
- }
-
- if(sg_offset) {
- /* Copy back the Scatter Gather buffers back to user space */
- u32 j;
- // TODO add 64 bit API
- struct sg_simple_element* sg;
- int sg_size;
-
- // re-acquire the original message to handle correctly the sg copy operation
- memset(&msg, 0, MAX_MESSAGE_SIZE*4);
- // get user msg size in u32s
- if(get_user(size, &user_msg[0])){
- rcode = -EFAULT;
- goto cleanup;
- }
- size = size>>16;
- size *= 4;
- if (size > MAX_MESSAGE_SIZE) {
- rcode = -EINVAL;
- goto cleanup;
- }
- /* Copy in the user's I2O command */
- if (copy_from_user (msg, user_msg, size)) {
- rcode = -EFAULT;
- goto cleanup;
- }
- sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
-
- // TODO add 64 bit API
- sg = (struct sg_simple_element*)(msg + sg_offset);
- for (j = 0; j < sg_count; j++) {
- /* Copy out the SG list to user's buffer if necessary */
- if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
- sg_size = sg[j].flag_count & 0xffffff;
- // sg_simple_element API is 32 bit
- if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
- printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
- rcode = -EFAULT;
- goto cleanup;
- }
- }
- }
- }
-
- /* Copy back the reply to user space */
- if (reply_size) {
- // we wrote our own values for context - now restore the user supplied ones
- if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
- printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
- rcode = -EFAULT;
- }
- if(copy_to_user(user_reply, reply, reply_size)) {
- printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
- rcode = -EFAULT;
- }
- }
-
-
-cleanup:
- if (rcode != -ETIME && rcode != -EINTR) {
- struct sg_simple_element *sg =
- (struct sg_simple_element*) (msg +sg_offset);
- while(sg_index) {
- if(sg_list[--sg_index]) {
- dma_free_coherent(&pHba->pDev->dev,
- sg[sg_index].flag_count & 0xffffff,
- sg_list[sg_index],
- sg[sg_index].addr_bus);
- }
- }
- }
-
-free:
- kfree(sg_list);
- kfree(reply);
- return rcode;
-}
-
-#if defined __ia64__
-static void adpt_ia64_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_IA64;
-}
-#endif
-
-#if defined __sparc__
-static void adpt_sparc_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ULTRASPARC;
-}
-#endif
-#if defined __alpha__
-static void adpt_alpha_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- si->processorType = PROC_ALPHA;
-}
-#endif
-
-#if defined __i386__
-
-#include <uapi/asm/vm86.h>
-
-static void adpt_i386_info(sysInfo_S* si)
-{
- // This is all the info we need for now
- // We will add more info as our new
- // managmenent utility requires it
- switch (boot_cpu_data.x86) {
- case CPU_386:
- si->processorType = PROC_386;
- break;
- case CPU_486:
- si->processorType = PROC_486;
- break;
- case CPU_586:
- si->processorType = PROC_PENTIUM;
- break;
- default: // Just in case
- si->processorType = PROC_PENTIUM;
- break;
- }
-}
-#endif
-
-/*
- * This routine returns information about the system. This does not effect
- * any logic and if the info is wrong - it doesn't matter.
- */
-
-/* Get all the info we can not get from kernel services */
-static int adpt_system_info(void __user *buffer)
-{
- sysInfo_S si;
-
- memset(&si, 0, sizeof(si));
-
- si.osType = OS_LINUX;
- si.osMajorVersion = 0;
- si.osMinorVersion = 0;
- si.osRevision = 0;
- si.busType = SI_PCI_BUS;
- si.processorFamily = DPTI_sig.dsProcessorFamily;
-
-#if defined __i386__
- adpt_i386_info(&si);
-#elif defined (__ia64__)
- adpt_ia64_info(&si);
-#elif defined(__sparc__)
- adpt_sparc_info(&si);
-#elif defined (__alpha__)
- adpt_alpha_info(&si);
-#else
- si.processorType = 0xff ;
-#endif
- if (copy_to_user(buffer, &si, sizeof(si))){
- printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
-{
- int minor;
- int error = 0;
- adpt_hba* pHba;
- ulong flags = 0;
- void __user *argp = (void __user *)arg;
-
- minor = iminor(inode);
- if (minor >= DPTI_MAX_HBA){
- return -ENXIO;
- }
- mutex_lock(&adpt_configuration_lock);
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- if (pHba->unit == minor) {
- break; /* found adapter */
- }
- }
- mutex_unlock(&adpt_configuration_lock);
- if(pHba == NULL){
- return -ENXIO;
- }
-
- while((volatile u32) pHba->state & DPTI_STATE_RESET )
- schedule_timeout_uninterruptible(2);
-
- switch (cmd) {
- // TODO: handle 3 cases
- case DPT_SIGNATURE:
- if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
- return -EFAULT;
- }
- break;
- case I2OUSRCMD:
- return adpt_i2o_passthru(pHba, argp);
-
- case DPT_CTRLINFO:{
- drvrHBAinfo_S HbaInfo;
-
-#define FLG_OSD_PCI_VALID 0x0001
-#define FLG_OSD_DMA 0x0002
-#define FLG_OSD_I2O 0x0004
- memset(&HbaInfo, 0, sizeof(HbaInfo));
- HbaInfo.drvrHBAnum = pHba->unit;
- HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
- HbaInfo.blinkState = adpt_read_blink_led(pHba);
- HbaInfo.pciBusNum = pHba->pDev->bus->number;
- HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
- HbaInfo.Interrupt = pHba->pDev->irq;
- HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
- if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
- printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
- return -EFAULT;
- }
- break;
- }
- case DPT_SYSINFO:
- return adpt_system_info(argp);
- case DPT_BLINKLED:{
- u32 value;
- value = (u32)adpt_read_blink_led(pHba);
- if (copy_to_user(argp, &value, sizeof(value))) {
- return -EFAULT;
- }
- break;
- }
- case I2ORESETCMD: {
- struct Scsi_Host *shost = pHba->host;
-
- if (shost)
- spin_lock_irqsave(shost->host_lock, flags);
- adpt_hba_reset(pHba);
- if (shost)
- spin_unlock_irqrestore(shost->host_lock, flags);
- break;
- }
- case I2ORESCANCMD:
- adpt_rescan(pHba);
- break;
- default:
- return -EINVAL;
- }
-
- return error;
-}
-
-static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
- ret = adpt_ioctl(inode, file, cmd, arg);
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long compat_adpt_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct inode *inode;
- long ret;
-
- inode = file_inode(file);
-
- mutex_lock(&adpt_mutex);
-
- switch(cmd) {
- case DPT_SIGNATURE:
- case I2OUSRCMD:
- case DPT_CTRLINFO:
- case DPT_SYSINFO:
- case DPT_BLINKLED:
- case I2ORESETCMD:
- case I2ORESCANCMD:
- case (DPT_TARGET_BUSY & 0xFFFF):
- case DPT_TARGET_BUSY:
- ret = adpt_ioctl(inode, file, cmd, arg);
- break;
- default:
- ret = -ENOIOCTLCMD;
- }
-
- mutex_unlock(&adpt_mutex);
-
- return ret;
-}
-#endif
-
-static irqreturn_t adpt_isr(int irq, void *dev_id)
-{
- struct scsi_cmnd* cmd;
- adpt_hba* pHba = dev_id;
- u32 m;
- void __iomem *reply;
- u32 status=0;
- u32 context;
- ulong flags = 0;
- int handled = 0;
-
- if (pHba == NULL){
- printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
- return IRQ_NONE;
- }
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
-
- while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // Try twice then give up
- rmb();
- m = readl(pHba->reply_port);
- if(m == EMPTY_QUEUE){
- // This really should not happen
- printk(KERN_ERR"dpti: Could not get reply frame\n");
- goto out;
- }
- }
- if (pHba->reply_pool_pa <= m &&
- m < pHba->reply_pool_pa +
- (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
- reply = (u8 *)pHba->reply_pool +
- (m - pHba->reply_pool_pa);
- } else {
- /* Ick, we should *never* be here */
- printk(KERN_ERR "dpti: reply frame not from pool\n");
- reply = (u8 *)bus_to_virt(m);
- }
-
- if (readl(reply) & MSG_FAIL) {
- u32 old_m = readl(reply+28);
- void __iomem *msg;
- u32 old_context;
- PDEBUG("%s: Failed message\n",pHba->name);
- if(old_m >= 0x100000){
- printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
- writel(m,pHba->reply_port);
- continue;
- }
- // Transaction context is 0 in failed reply frame
- msg = pHba->msg_addr_virt + old_m;
- old_context = readl(msg+12);
- writel(old_context, reply+12);
- adpt_send_nop(pHba, old_m);
- }
- context = readl(reply+8);
- if(context & 0x40000000){ // IOCTL
- void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
- if( p != NULL) {
- memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
- }
- // All IOCTLs will also be post wait
- }
- if(context & 0x80000000){ // Post wait message
- status = readl(reply+16);
- if(status >> 24){
- status &= 0xffff; /* Get detail status */
- } else {
- status = I2O_POST_WAIT_OK;
- }
- if(!(context & 0x40000000)) {
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL) {
- printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
- }
- }
- adpt_i2o_post_wait_complete(context, status);
- } else { // SCSI message
- /*
- * The request tag is one less than the command tag
- * as the firmware might treat a 0 tag as invalid
- */
- cmd = scsi_host_find_tag(pHba->host,
- readl(reply + 12) - 1);
- if(cmd != NULL){
- scsi_dma_unmap(cmd);
- adpt_i2o_scsi_complete(reply, cmd);
- }
- }
- writel(m, pHba->reply_port);
- wmb();
- rmb();
- }
- handled = 1;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return IRQ_RETVAL(handled);
-}
-
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
-{
- int i;
- u32 msg[MAX_MESSAGE_SIZE];
- u32* mptr;
- u32* lptr;
- u32 *lenptr;
- int direction;
- int scsidir;
- int nseg;
- u32 len;
- u32 reqlen;
- s32 rcode;
- dma_addr_t addr;
-
- memset(msg, 0 , sizeof(msg));
- len = scsi_bufflen(cmd);
- direction = 0x00000000;
-
- scsidir = 0x00000000; // DATA NO XFER
- if(len) {
- /*
- * Set SCBFlags to indicate if data is being transferred
- * in or out, or no data transfer
- * Note: Do not have to verify index is less than 0 since
- * cmd->cmnd[0] is an unsigned char
- */
- switch(cmd->sc_data_direction){
- case DMA_FROM_DEVICE:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- break;
- case DMA_TO_DEVICE:
- direction=0x04000000; // SGL OUT
- scsidir =0x80000000; // DATA OUT (iop-->dev)
- break;
- case DMA_NONE:
- break;
- case DMA_BIDIRECTIONAL:
- scsidir =0x40000000; // DATA IN (iop<--dev)
- // Assume In - and continue;
- break;
- default:
- printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
- pHba->name, cmd->cmnd[0]);
- cmd->result = (DID_ERROR <<16);
- scsi_done(cmd);
- return 0;
- }
- }
- // msg[0] is set later
- // I2O_CMD_SCSI_EXEC
- msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
- msg[2] = 0;
- /* Add 1 to avoid firmware treating it as invalid command */
- msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
- // Our cards use the transaction context as the tag for queueing
- // Adaptec/DPT Private stuff
- msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
- msg[5] = d->tid;
- /* Direction, disconnect ok | sense data | simple queue , CDBLen */
- // I2O_SCB_FLAG_ENABLE_DISCONNECT |
- // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
- // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
- msg[6] = scsidir|0x20a00000|cmd->cmd_len;
-
- mptr=msg+7;
-
- // Write SCSI command into the message - always 16 byte block
- memset(mptr, 0, 16);
- memcpy(mptr, cmd->cmnd, cmd->cmd_len);
- mptr+=4;
- lenptr=mptr++; /* Remember me - fill in when we know */
- if (dpt_dma64(pHba)) {
- reqlen = 16; // SINGLE SGE
- *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
- *mptr++ = 1 << PAGE_SHIFT;
- } else {
- reqlen = 14; // SINGLE SGE
- }
- /* Now fill in the SGList and command */
-
- nseg = scsi_dma_map(cmd);
- BUG_ON(nseg < 0);
- if (nseg) {
- struct scatterlist *sg;
-
- len = 0;
- scsi_for_each_sg(cmd, sg, nseg, i) {
- lptr = mptr;
- *mptr++ = direction|0x10000000|sg_dma_len(sg);
- len+=sg_dma_len(sg);
- addr = sg_dma_address(sg);
- *mptr++ = dma_low(addr);
- if (dpt_dma64(pHba))
- *mptr++ = dma_high(addr);
- /* Make this an end of list */
- if (i == nseg - 1)
- *lptr = direction|0xD0000000|sg_dma_len(sg);
- }
- reqlen = mptr - msg;
- *lenptr = len;
-
- if(cmd->underflow && len != cmd->underflow){
- printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
- len, cmd->underflow);
- }
- } else {
- *lenptr = len = 0;
- reqlen = 12;
- }
-
- /* Stick the headers on */
- msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
-
- // Send it on it's way
- rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
- if (rcode == 0) {
- return 0;
- }
- return rcode;
-}
-
-
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
-{
- struct Scsi_Host *host;
-
- host = scsi_host_alloc(sht, sizeof(adpt_hba*));
- if (host == NULL) {
- printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
- return -1;
- }
- host->hostdata[0] = (unsigned long)pHba;
- pHba->host = host;
-
- host->irq = pHba->pDev->irq;
- /* no IO ports, so don't have to set host->io_port and
- * host->n_io_port
- */
- host->io_port = 0;
- host->n_io_port = 0;
- /* see comments in scsi_host.h */
- host->max_id = 16;
- host->max_lun = 256;
- host->max_channel = pHba->top_scsi_channel + 1;
- host->cmd_per_lun = 1;
- host->unique_id = (u32)sys_tbl_pa + pHba->unit;
- host->sg_tablesize = pHba->sg_tablesize;
- host->can_queue = pHba->post_fifo_size;
-
- return 0;
-}
-
-
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
-{
- adpt_hba* pHba;
- u32 hba_status;
- u32 dev_status;
- u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
- // I know this would look cleaner if I just read bytes
- // but the model I have been using for all the rest of the
- // io is in 4 byte words - so I keep that model
- u16 detailed_status = readl(reply+16) &0xffff;
- dev_status = (detailed_status & 0xff);
- hba_status = detailed_status >> 8;
-
- // calculate resid for sg
- scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
-
- pHba = (adpt_hba*) cmd->device->host->hostdata[0];
-
- cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
-
- if(!(reply_flags & MSG_FAIL)) {
- switch(detailed_status & I2O_SCSI_DSC_MASK) {
- case I2O_SCSI_DSC_SUCCESS:
- cmd->result = (DID_OK << 16);
- // handle underflow
- if (readl(reply+20) < cmd->underflow) {
- cmd->result = (DID_ERROR <<16);
- printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
- }
- break;
- case I2O_SCSI_DSC_REQUEST_ABORTED:
- cmd->result = (DID_ABORT << 16);
- break;
- case I2O_SCSI_DSC_PATH_INVALID:
- case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
- case I2O_SCSI_DSC_SELECTION_TIMEOUT:
- case I2O_SCSI_DSC_COMMAND_TIMEOUT:
- case I2O_SCSI_DSC_NO_ADAPTER:
- case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
- printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_TIME_OUT << 16);
- break;
- case I2O_SCSI_DSC_ADAPTER_BUSY:
- case I2O_SCSI_DSC_BUS_BUSY:
- cmd->result = (DID_BUS_BUSY << 16);
- break;
- case I2O_SCSI_DSC_SCSI_BUS_RESET:
- case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
- cmd->result = (DID_RESET << 16);
- break;
- case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
- printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
- cmd->result = (DID_PARITY << 16);
- break;
- case I2O_SCSI_DSC_UNABLE_TO_ABORT:
- case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
- case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
- case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_AUTOSENSE_FAILED:
- case I2O_SCSI_DSC_DATA_OVERRUN:
- case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
- case I2O_SCSI_DSC_SEQUENCE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
- case I2O_SCSI_DSC_PROVIDE_FAILURE:
- case I2O_SCSI_DSC_REQUEST_TERMINATED:
- case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
- case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
- case I2O_SCSI_DSC_MESSAGE_RECEIVED:
- case I2O_SCSI_DSC_INVALID_CDB:
- case I2O_SCSI_DSC_LUN_INVALID:
- case I2O_SCSI_DSC_SCSI_TID_INVALID:
- case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
- case I2O_SCSI_DSC_NO_NEXUS:
- case I2O_SCSI_DSC_CDB_RECEIVED:
- case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
- case I2O_SCSI_DSC_QUEUE_FROZEN:
- case I2O_SCSI_DSC_REQUEST_INVALID:
- default:
- printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
- cmd->result = (DID_ERROR << 16);
- break;
- }
-
- // copy over the request sense data if it was a check
- // condition status
- if (dev_status == SAM_STAT_CHECK_CONDITION) {
- u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
- // Copy over the sense data
- memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
- if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
- cmd->sense_buffer[2] == DATA_PROTECT ){
- /* This is to handle an array failed */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- hba_status, dev_status, cmd->cmnd[0]);
-
- }
- }
- } else {
- /* In this condtion we could not talk to the tid
- * the card rejected it. We should signal a retry
- * for a limitted number of retries.
- */
- cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
- ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
- }
-
- cmd->result |= (dev_status);
-
- scsi_done(cmd);
-}
-
-
-static s32 adpt_rescan(adpt_hba* pHba)
-{
- s32 rcode;
- ulong flags = 0;
-
- if(pHba->host)
- spin_lock_irqsave(pHba->host->host_lock, flags);
- if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
- goto out;
- if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
- goto out;
- rcode = 0;
-out: if(pHba->host)
- spin_unlock_irqrestore(pHba->host->host_lock, flags);
- return rcode;
-}
-
-
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
-{
- int i;
- int max;
- int tid;
- struct i2o_device *d;
- i2o_lct *lct = pHba->lct;
- u8 bus_no = 0;
- s16 scsi_id;
- u64 scsi_lun;
- u32 buf[10]; // at least 8 u32's
- struct adpt_device* pDev = NULL;
- struct i2o_device* pI2o_dev = NULL;
-
- if (lct == NULL) {
- printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
- return -1;
- }
-
- max = lct->table_size;
- max -= 3;
- max /= 9;
-
- // Mark each drive as unscanned
- for (d = pHba->devices; d; d = d->next) {
- pDev =(struct adpt_device*) d->owner;
- if(!pDev){
- continue;
- }
- pDev->state |= DPTI_DEV_UNSCANNED;
- }
-
- printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
-
- for(i=0;i<max;i++) {
- if( lct->lct_entry[i].user_tid != 0xfff){
- continue;
- }
-
- if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
- lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
- lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
- tid = lct->lct_entry[i].tid;
- if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
- printk(KERN_ERR"%s: Could not query device\n",pHba->name);
- continue;
- }
- bus_no = buf[0]>>16;
- if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
- printk(KERN_WARNING
- "%s: Channel number %d out of range\n",
- pHba->name, bus_no);
- continue;
- }
-
- scsi_id = buf[1];
- scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
- pDev = pHba->channel[bus_no].device[scsi_id];
- /* da lun */
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- break;
- }
- pDev = pDev->next_lun;
- }
- if(!pDev ) { // Something new add it
- d = kmalloc(sizeof(struct i2o_device),
- GFP_ATOMIC);
- if(d==NULL)
- {
- printk(KERN_CRIT "Out of memory for I2O device data.\n");
- return -ENOMEM;
- }
-
- d->controller = pHba;
- d->next = NULL;
-
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
-
- d->flags = 0;
- adpt_i2o_report_hba_unit(pHba, d);
- adpt_i2o_install_device(pHba, d);
-
- pDev = pHba->channel[bus_no].device[scsi_id];
- if( pDev == NULL){
- pDev =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- pHba->channel[bus_no].device[scsi_id] = pDev;
- } else {
- while (pDev->next_lun) {
- pDev = pDev->next_lun;
- }
- pDev = pDev->next_lun =
- kzalloc(sizeof(struct adpt_device),
- GFP_ATOMIC);
- if(pDev == NULL) {
- return -ENOMEM;
- }
- }
- pDev->tid = d->lct_data.tid;
- pDev->scsi_channel = bus_no;
- pDev->scsi_id = scsi_id;
- pDev->scsi_lun = scsi_lun;
- pDev->pI2o_dev = d;
- d->owner = pDev;
- pDev->type = (buf[0])&0xff;
- pDev->flags = (buf[0]>>8)&0xff;
- // Too late, SCSI system has made up it's mind, but what the hey ...
- if(scsi_id > pHba->top_scsi_id){
- pHba->top_scsi_id = scsi_id;
- }
- if(scsi_lun > pHba->top_scsi_lun){
- pHba->top_scsi_lun = scsi_lun;
- }
- continue;
- } // end of new i2o device
-
- // We found an old device - check it
- while(pDev) {
- if(pDev->scsi_lun == scsi_lun) {
- if(!scsi_device_online(pDev->pScsi_dev)) {
- printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
- pHba->name,bus_no,scsi_id,scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
- }
- }
- d = pDev->pI2o_dev;
- if(d->lct_data.tid != tid) { // something changed
- pDev->tid = tid;
- memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
- if (pDev->pScsi_dev) {
- pDev->pScsi_dev->changed = TRUE;
- pDev->pScsi_dev->removable = TRUE;
- }
- }
- // Found it - mark it scanned
- pDev->state = DPTI_DEV_ONLINE;
- break;
- }
- pDev = pDev->next_lun;
- }
- }
- }
- for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
- pDev =(struct adpt_device*) pI2o_dev->owner;
- if(!pDev){
- continue;
- }
- // Drive offline drives that previously existed but could not be found
- // in the LCT table
- if (pDev->state & DPTI_DEV_UNSCANNED){
- pDev->state = DPTI_DEV_OFFLINE;
- printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
- if (pDev->pScsi_dev) {
- scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
- }
- }
- }
- return 0;
-}
-
-/*============================================================================
- * Routines from i2o subsystem
- *============================================================================
- */
-
-
-
-/*
- * Bring an I2O controller into HOLD state. See the spec.
- */
-static int adpt_i2o_activate_hba(adpt_hba* pHba)
-{
- int rcode;
-
- if(pHba->initialized ) {
- if (adpt_i2o_status_get(pHba) < 0) {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
- if (adpt_i2o_status_get(pHba) < 0) {
- printk(KERN_INFO "HBA not responding.\n");
- return -1;
- }
- }
-
- if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
- printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
- return -1;
- }
-
- if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
- pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
- pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
- pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
- adpt_i2o_reset_hba(pHba);
- if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
- printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
- return -1;
- }
- }
- } else {
- if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
- printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
- return rcode;
- }
-
- }
-
- if (adpt_i2o_init_outbound_q(pHba) < 0) {
- return -1;
- }
-
- /* In HOLD state */
-
- if (adpt_i2o_hrt_get(pHba) < 0) {
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Bring a controller online into OPERATIONAL state.
- */
-
-static int adpt_i2o_online_hba(adpt_hba* pHba)
-{
- if (adpt_i2o_systab_send(pHba) < 0)
- return -1;
- /* In READY state */
-
- if (adpt_i2o_enable_hba(pHba) < 0)
- return -1;
-
- /* In OPERATIONAL state */
- return 0;
-}
-
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
-{
- u32 __iomem *msg;
- ulong timeout = jiffies + 5*HZ;
-
- while(m == EMPTY_QUEUE){
- rmb();
- m = readl(pHba->post_port);
- if(m != EMPTY_QUEUE){
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
- return 2;
- }
- schedule_timeout_uninterruptible(1);
- }
- msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
- writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
- writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
- writel( 0,&msg[2]);
- wmb();
-
- writel(m, pHba->post_port);
- wmb();
- return 0;
-}
-
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
-{
- u8 *status;
- dma_addr_t addr;
- u32 __iomem *msg = NULL;
- int i;
- ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
- u32 m;
-
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
-
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m == EMPTY_QUEUE);
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
- if (!status) {
- adpt_send_nop(pHba, m);
- printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
- pHba->name);
- return -ENOMEM;
- }
-
- writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
- writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
- writel(0, &msg[2]);
- writel(0x0106, &msg[3]); /* Transaction context */
- writel(4096, &msg[4]); /* Host page frame size */
- writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
- writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
- writel((u32)addr, &msg[7]);
-
- writel(m, pHba->post_port);
- wmb();
-
- // Wait for the reply status to come back
- do {
- if (*status) {
- if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
- break;
- }
- }
- rmb();
- if(time_after(jiffies,timeout)){
- printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
- /* We lose 4 bytes of "status" here, but we
- cannot free these because controller may
- awake and corrupt those bytes at any time */
- /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while (1);
-
- // If the command was successful, fill the fifo with our reply
- // message packets
- if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
- return -2;
- }
- dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
-
- if(pHba->reply_pool != NULL) {
- dma_free_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- pHba->reply_pool, pHba->reply_pool_pa);
- }
-
- pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
- &pHba->reply_pool_pa, GFP_KERNEL);
- if (!pHba->reply_pool) {
- printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
- return -ENOMEM;
- }
-
- for(i = 0; i < pHba->reply_fifo_size; i++) {
- writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
- pHba->reply_port);
- wmb();
- }
- adpt_i2o_status_get(pHba);
- return 0;
-}
-
-
-/*
- * I2O System Table. Contains information about
- * all the IOPs in the system. Used to inform IOPs
- * about each other's existence.
- *
- * sys_tbl_ver is the CurrentChangeIndicator that is
- * used by IOPs to track changes.
- */
-
-
-
-static s32 adpt_i2o_status_get(adpt_hba* pHba)
-{
- ulong timeout;
- u32 m;
- u32 __iomem *msg;
- u8 *status_block=NULL;
-
- if(pHba->status_block == NULL) {
- pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(i2o_status_block),
- &pHba->status_block_pa, GFP_KERNEL);
- if(pHba->status_block == NULL) {
- printk(KERN_ERR
- "dpti%d: Get Status Block failed; Out of memory. \n",
- pHba->unit);
- return -ENOMEM;
- }
- }
- memset(pHba->status_block, 0, sizeof(i2o_status_block));
- status_block = (u8*)(pHba->status_block);
- timeout = jiffies+TMOUT_GETSTATUS*HZ;
- do {
- rmb();
- m = readl(pHba->post_port);
- if (m != EMPTY_QUEUE) {
- break;
- }
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR "%s: Timeout waiting for message !\n",
- pHba->name);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- } while(m==EMPTY_QUEUE);
-
-
- msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
-
- writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
- writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
- writel(1, &msg[2]);
- writel(0, &msg[3]);
- writel(0, &msg[4]);
- writel(0, &msg[5]);
- writel( dma_low(pHba->status_block_pa), &msg[6]);
- writel( dma_high(pHba->status_block_pa), &msg[7]);
- writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
-
- //post message
- writel(m, pHba->post_port);
- wmb();
-
- while(status_block[87]!=0xff){
- if(time_after(jiffies,timeout)){
- printk(KERN_ERR"dpti%d: Get status timeout.\n",
- pHba->unit);
- return -ETIMEDOUT;
- }
- rmb();
- schedule_timeout_uninterruptible(1);
- }
-
- // Set up our number of outbound and inbound messages
- pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
- if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
- pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
- }
-
- pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
- if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
- pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
- }
-
- // Calculate the Scatter Gather list size
- if (dpt_dma64(pHba)) {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 14 * sizeof(u32))
- / (sizeof(struct sg_simple_element) + sizeof(u32)));
- } else {
- pHba->sg_tablesize
- = ((pHba->status_block->inbound_frame_size * 4
- - 12 * sizeof(u32))
- / sizeof(struct sg_simple_element));
- }
- if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
- pHba->sg_tablesize = SG_LIST_ELEMENTS;
- }
-
-
-#ifdef DEBUG
- printk("dpti%d: State = ",pHba->unit);
- switch(pHba->status_block->iop_state) {
- case 0x01:
- printk("INIT\n");
- break;
- case 0x02:
- printk("RESET\n");
- break;
- case 0x04:
- printk("HOLD\n");
- break;
- case 0x05:
- printk("READY\n");
- break;
- case 0x08:
- printk("OPERATIONAL\n");
- break;
- case 0x10:
- printk("FAILED\n");
- break;
- case 0x11:
- printk("FAULTED\n");
- break;
- default:
- printk("%x (unknown!!)\n",pHba->status_block->iop_state);
- }
-#endif
- return 0;
-}
-
-/*
- * Get the IOP's Logical Configuration Table
- */
-static int adpt_i2o_lct_get(adpt_hba* pHba)
-{
- u32 msg[8];
- int ret;
- u32 buf[16];
-
- if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
- pHba->lct_size = pHba->status_block->expected_lct_size;
- }
- do {
- if (pHba->lct == NULL) {
- pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
- pHba->lct_size, &pHba->lct_pa,
- GFP_ATOMIC);
- if(pHba->lct == NULL) {
- printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- }
- memset(pHba->lct, 0, pHba->lct_size);
-
- msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
- msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0xFFFFFFFF; /* All devices */
- msg[5] = 0x00000000; /* Report now */
- msg[6] = 0xD0000000|pHba->lct_size;
- msg[7] = (u32)pHba->lct_pa;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
- printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
- pHba->name, ret);
- printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
- return ret;
- }
-
- if ((pHba->lct->table_size << 2) > pHba->lct_size) {
- pHba->lct_size = pHba->lct->table_size << 2;
- dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
- pHba->lct, pHba->lct_pa);
- pHba->lct = NULL;
- }
- } while (pHba->lct == NULL);
-
- PDEBUG("%s: Hardware resource table read.\n", pHba->name);
-
-
- // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
- if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
- pHba->FwDebugBufferSize = buf[1];
- pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
- pHba->FwDebugBufferSize);
- if (pHba->FwDebugBuffer_P) {
- pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_FLAGS_OFFSET;
- pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_BLED_OFFSET;
- pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
- pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
- FW_DEBUG_STR_LENGTH_OFFSET;
- pHba->FwDebugBuffer_P += buf[2];
- pHba->FwDebugFlags = 0;
- }
- }
-
- return 0;
-}
-
-static int adpt_i2o_build_sys_table(void)
-{
- adpt_hba* pHba = hba_chain;
- int count = 0;
-
- if (sys_tbl)
- dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
- sys_tbl, sys_tbl_pa);
-
- sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
- (hba_count) * sizeof(struct i2o_sys_tbl_entry);
-
- sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
- sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
- if (!sys_tbl) {
- printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
- return -ENOMEM;
- }
-
- sys_tbl->num_entries = hba_count;
- sys_tbl->version = I2OVERSION;
- sys_tbl->change_ind = sys_tbl_ind++;
-
- for(pHba = hba_chain; pHba; pHba = pHba->next) {
- u64 addr;
- // Get updated Status Block so we have the latest information
- if (adpt_i2o_status_get(pHba)) {
- sys_tbl->num_entries--;
- continue; // try next one
- }
-
- sys_tbl->iops[count].org_id = pHba->status_block->org_id;
- sys_tbl->iops[count].iop_id = pHba->unit + 2;
- sys_tbl->iops[count].seg_num = 0;
- sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
- sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
- sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
- sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
- sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
- sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
- addr = pHba->base_addr_phys + 0x40;
- sys_tbl->iops[count].inbound_low = dma_low(addr);
- sys_tbl->iops[count].inbound_high = dma_high(addr);
-
- count++;
- }
-
-#ifdef DEBUG
-{
- u32 *table = (u32*)sys_tbl;
- printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
- for(count = 0; count < (sys_tbl_len >>2); count++) {
- printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
- count, table[count]);
- }
-}
-#endif
-
- return 0;
-}
-
-
-/*
- * Dump the information block associated with a given unit (TID)
- */
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
-{
- char buf[64];
- int unit = d->lct_data.tid;
-
- printk(KERN_INFO "TID %3.3d ", unit);
-
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Vendor: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
- {
- buf[16]=0;
- printk(" Device: %-12.12s", buf);
- }
- if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
- {
- buf[8]=0;
- printk(" Rev: %-12.12s\n", buf);
- }
-#ifdef DEBUG
- printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
- printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
- printk(KERN_INFO "\tFlags: ");
-
- if(d->lct_data.device_flags&(1<<0))
- printk("C"); // ConfigDialog requested
- if(d->lct_data.device_flags&(1<<1))
- printk("U"); // Multi-user capable
- if(!(d->lct_data.device_flags&(1<<4)))
- printk("P"); // Peer service enabled!
- if(!(d->lct_data.device_flags&(1<<5)))
- printk("M"); // Mgmt service enabled!
- printk("\n");
-#endif
-}
-
-#ifdef DEBUG
-/*
- * Do i2o class name lookup
- */
-static const char *adpt_i2o_get_class_name(int class)
-{
- int idx = 16;
- static char *i2o_class_name[] = {
- "Executive",
- "Device Driver Module",
- "Block Device",
- "Tape Device",
- "LAN Interface",
- "WAN Interface",
- "Fibre Channel Port",
- "Fibre Channel Device",
- "SCSI Device",
- "ATE Port",
- "ATE Device",
- "Floppy Controller",
- "Floppy Device",
- "Secondary Bus Port",
- "Peer Transport Agent",
- "Peer Transport",
- "Unknown"
- };
-
- switch(class&0xFFF) {
- case I2O_CLASS_EXECUTIVE:
- idx = 0; break;
- case I2O_CLASS_DDM:
- idx = 1; break;
- case I2O_CLASS_RANDOM_BLOCK_STORAGE:
- idx = 2; break;
- case I2O_CLASS_SEQUENTIAL_STORAGE:
- idx = 3; break;
- case I2O_CLASS_LAN:
- idx = 4; break;
- case I2O_CLASS_WAN:
- idx = 5; break;
- case I2O_CLASS_FIBRE_CHANNEL_PORT:
- idx = 6; break;
- case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
- idx = 7; break;
- case I2O_CLASS_SCSI_PERIPHERAL:
- idx = 8; break;
- case I2O_CLASS_ATE_PORT:
- idx = 9; break;
- case I2O_CLASS_ATE_PERIPHERAL:
- idx = 10; break;
- case I2O_CLASS_FLOPPY_CONTROLLER:
- idx = 11; break;
- case I2O_CLASS_FLOPPY_DEVICE:
- idx = 12; break;
- case I2O_CLASS_BUS_ADAPTER_PORT:
- idx = 13; break;
- case I2O_CLASS_PEER_TRANSPORT_AGENT:
- idx = 14; break;
- case I2O_CLASS_PEER_TRANSPORT:
- idx = 15; break;
- }
- return i2o_class_name[idx];
-}
-#endif
-
-
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
-{
- u32 msg[6];
- int ret, size = sizeof(i2o_hrt);
-
- do {
- if (pHba->hrt == NULL) {
- pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
- size, &pHba->hrt_pa, GFP_KERNEL);
- if (pHba->hrt == NULL) {
- printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
- }
-
- msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
- msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
- msg[4]= (0xD0000000 | size); /* Simple transaction */
- msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
- printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
- return ret;
- }
-
- if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
- int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
- dma_free_coherent(&pHba->pDev->dev, size,
- pHba->hrt, pHba->hrt_pa);
- size = newsize;
- pHba->hrt = NULL;
- }
- } while(pHba->hrt == NULL);
- return 0;
-}
-
-/*
- * Query one scalar group value or a whole scalar group.
- */
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen)
-{
- u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
- u8 *opblk_va;
- dma_addr_t opblk_pa;
- u8 *resblk_va;
- dma_addr_t resblk_pa;
-
- int size;
-
- /* 8 bytes for header */
- resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
- if (resblk_va == NULL) {
- printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
- return -ENOMEM;
- }
-
- opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
- sizeof(opblk), &opblk_pa, GFP_KERNEL);
- if (opblk_va == NULL) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
- pHba->name);
- return -ENOMEM;
- }
- if (field == -1) /* whole group */
- opblk[4] = -1;
-
- memcpy(opblk_va, opblk, sizeof(opblk));
- size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
- opblk_va, opblk_pa, sizeof(opblk),
- resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
- dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
- if (size == -ETIME) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
- return -ETIME;
- } else if (size == -EINTR) {
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
- return -EINTR;
- }
-
- memcpy(buf, resblk_va+8, buflen); /* cut off header */
-
- dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
- resblk_va, resblk_pa);
- if (size < 0)
- return size;
-
- return buflen;
-}
-
-
-/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
- *
- * This function can be used for all UtilParamsGet/Set operations.
- * The OperationBlock is given in opblk-buffer,
- * and results are returned in resblk-buffer.
- * Note that the minimum sized resblk is 8 bytes and contains
- * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
- */
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk_va, dma_addr_t opblk_pa, int oplen,
- void *resblk_va, dma_addr_t resblk_pa, int reslen)
-{
- u32 msg[9];
- u32 *res = (u32 *)resblk_va;
- int wait_status;
-
- msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
- msg[1] = cmd << 24 | HOST_TID << 12 | tid;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = 0;
- msg[5] = 0x54000000 | oplen; /* OperationBlock */
- msg[6] = (u32)opblk_pa;
- msg[7] = 0xD0000000 | reslen; /* ResultBlock */
- msg[8] = (u32)resblk_pa;
-
- if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
- printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
- return wait_status; /* -DetailedStatus */
- }
-
- if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
- printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
- "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
- pHba->name,
- (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
- : "PARAMS_GET",
- res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
- return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
- }
-
- return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
-}
-
-
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
-
- /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
-
- if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
- (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
- return 0;
- }
-
- msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
-
- if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
- pHba->unit, -ret);
- } else {
- printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-/*
- * Enable IOP. Allows the IOP to resume external operations.
- */
-static int adpt_i2o_enable_hba(adpt_hba* pHba)
-{
- u32 msg[4];
- int ret;
-
- adpt_i2o_status_get(pHba);
- if(!pHba->status_block){
- return -ENOMEM;
- }
- /* Enable only allowed on READY state */
- if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
- return 0;
-
- if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
- return -EINVAL;
-
- msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
- msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
- msg[2]= 0;
- msg[3]= 0;
-
- if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
- printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
- pHba->name, ret);
- } else {
- PDEBUG("%s: Enabled.\n", pHba->name);
- }
-
- adpt_i2o_status_get(pHba);
- return ret;
-}
-
-
-static int adpt_i2o_systab_send(adpt_hba* pHba)
-{
- u32 msg[12];
- int ret;
-
- msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
- msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
- msg[2] = 0;
- msg[3] = 0;
- msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
- msg[5] = 0; /* Segment 0 */
-
- /*
- * Provide three SGL-elements:
- * System table (SysTab), Private memory space declaration and
- * Private i/o space declaration
- */
- msg[6] = 0x54000000 | sys_tbl_len;
- msg[7] = (u32)sys_tbl_pa;
- msg[8] = 0x54000000 | 0;
- msg[9] = 0;
- msg[10] = 0xD4000000 | 0;
- msg[11] = 0;
-
- if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
- printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
- pHba->name, ret);
- }
-#ifdef DEBUG
- else {
- PINFO("%s: SysTab set.\n", pHba->name);
- }
-#endif
-
- return ret;
-}
-
-
-/*============================================================================
- *
- *============================================================================
- */
-
-
-#ifdef UARTDELAY
-
-static static void adpt_delay(int millisec)
-{
- int i;
- for (i = 0; i < millisec; i++) {
- udelay(1000); /* delay for one millisecond */
- }
-}
-
-#endif
-
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = "dpt_i2o",
- .proc_name = "dpt_i2o",
- .show_info = adpt_show_info,
- .info = adpt_info,
- .queuecommand = adpt_queue,
- .eh_abort_handler = adpt_abort,
- .eh_device_reset_handler = adpt_device_reset,
- .eh_bus_reset_handler = adpt_bus_reset,
- .eh_host_reset_handler = adpt_reset,
- .bios_param = adpt_bios_param,
- .slave_configure = adpt_slave_configure,
- .can_queue = MAX_TO_IOP_MESSAGES,
- .this_id = 7,
-};
-
-static int __init adpt_init(void)
-{
- int error;
- adpt_hba *pHba, *next;
-
- printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
-
- error = adpt_detect(&driver_template);
- if (error < 0)
- return error;
- if (hba_chain == NULL)
- return -ENODEV;
-
- for (pHba = hba_chain; pHba; pHba = pHba->next) {
- error = scsi_add_host(pHba->host, &pHba->pDev->dev);
- if (error)
- goto fail;
- scsi_scan_host(pHba->host);
- }
- return 0;
-fail:
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- scsi_remove_host(pHba->host);
- }
- return error;
-}
-
-static void __exit adpt_exit(void)
-{
- adpt_hba *pHba, *next;
-
- for (pHba = hba_chain; pHba; pHba = next) {
- next = pHba->next;
- adpt_release(pHba);
- }
-}
-
-module_init(adpt_init);
-module_exit(adpt_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
deleted file mode 100644
index 8a079e8d7f65..000000000000
--- a/drivers/scsi/dpti.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/***************************************************************************
- dpti.h - description
- -------------------
- begin : Thu Sep 7 2000
- copyright : (C) 2001 by Adaptec
-
- See Documentation/scsi/dpti.rst for history, notes, license info
- and credits
- ***************************************************************************/
-
-/***************************************************************************
- * *
- * *
- ***************************************************************************/
-
-#ifndef _DPT_H
-#define _DPT_H
-
-#define MAX_TO_IOP_MESSAGES (255)
-#define MAX_FROM_IOP_MESSAGES (255)
-
-
-/*
- * SCSI interface function Prototypes
- */
-
-static int adpt_detect(struct scsi_host_template * sht);
-static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd);
-static int adpt_abort(struct scsi_cmnd * cmd);
-static int adpt_reset(struct scsi_cmnd* cmd);
-static int adpt_slave_configure(struct scsi_device *);
-
-static const char *adpt_info(struct Scsi_Host *pSHost);
-static int adpt_bios_param(struct scsi_device * sdev, struct block_device *dev,
- sector_t, int geom[]);
-
-static int adpt_bus_reset(struct scsi_cmnd* cmd);
-static int adpt_device_reset(struct scsi_cmnd* cmd);
-
-
-/*
- * struct scsi_host_template (see scsi/scsi_host.h)
- */
-
-#define DPT_DRIVER_NAME "Adaptec I2O RAID"
-
-#ifndef HOSTS_C
-
-#include "dpt/sys_info.h"
-#include <linux/wait.h>
-#include "dpt/dpti_i2o.h"
-#include "dpt/dpti_ioctl.h"
-
-#define DPT_I2O_VERSION "2.4 Build 5go"
-#define DPT_VERSION 2
-#define DPT_REVISION '4'
-#define DPT_SUBREVISION '5'
-#define DPT_BETA ""
-#define DPT_MONTH 8
-#define DPT_DAY 7
-#define DPT_YEAR (2001-1980)
-
-#define DPT_DRIVER "dpt_i2o"
-#define DPTI_I2O_MAJOR (151)
-#define DPT_ORGANIZATION_ID (0x1B) /* For Private Messages */
-#define DPTI_MAX_HBA (16)
-#define MAX_CHANNEL (5) // Maximum Channel # Supported
-#define MAX_ID (128) // Maximum Target ID Supported
-
-/* Sizes in 4 byte words */
-#define REPLY_FRAME_SIZE (17)
-#define MAX_MESSAGE_SIZE (128)
-#define SG_LIST_ELEMENTS (56)
-
-#define EMPTY_QUEUE 0xffffffff
-#define I2O_INTERRUPT_PENDING_B (0x08)
-
-#define PCI_DPT_VENDOR_ID (0x1044) // DPT PCI Vendor ID
-#define PCI_DPT_DEVICE_ID (0xA501) // DPT PCI I2O Device ID
-#define PCI_DPT_RAPTOR_DEVICE_ID (0xA511)
-
-/* Debugging macro from Linux Device Drivers - Rubini */
-#undef PDEBUG
-#ifdef DEBUG
-//TODO add debug level switch
-# define PDEBUG(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-# define PDEBUGV(fmt, args...) printk(KERN_DEBUG "dpti: " fmt, ##args)
-#else
-# define PDEBUG(fmt, args...) /* not debugging: nothing */
-# define PDEBUGV(fmt, args...) /* not debugging: nothing */
-#endif
-
-#define PERROR(fmt, args...) printk(KERN_ERR fmt, ##args)
-#define PWARN(fmt, args...) printk(KERN_WARNING fmt, ##args)
-#define PINFO(fmt, args...) printk(KERN_INFO fmt, ##args)
-#define PCRIT(fmt, args...) printk(KERN_CRIT fmt, ##args)
-
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM))
-
-// Command timeouts
-#define FOREVER (0)
-#define TMOUT_INQUIRY (20)
-#define TMOUT_FLUSH (360/45)
-#define TMOUT_ABORT (30)
-#define TMOUT_SCSI (300)
-#define TMOUT_IOPRESET (360)
-#define TMOUT_GETSTATUS (15)
-#define TMOUT_INITOUTBOUND (15)
-#define TMOUT_LCT (360)
-
-
-#define I2O_SCSI_DEVICE_DSC_MASK 0x00FF
-
-#define I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION 0x000A
-
-#define I2O_SCSI_DSC_MASK 0xFF00
-#define I2O_SCSI_DSC_SUCCESS 0x0000
-#define I2O_SCSI_DSC_REQUEST_ABORTED 0x0200
-#define I2O_SCSI_DSC_UNABLE_TO_ABORT 0x0300
-#define I2O_SCSI_DSC_COMPLETE_WITH_ERROR 0x0400
-#define I2O_SCSI_DSC_ADAPTER_BUSY 0x0500
-#define I2O_SCSI_DSC_REQUEST_INVALID 0x0600
-#define I2O_SCSI_DSC_PATH_INVALID 0x0700
-#define I2O_SCSI_DSC_DEVICE_NOT_PRESENT 0x0800
-#define I2O_SCSI_DSC_UNABLE_TO_TERMINATE 0x0900
-#define I2O_SCSI_DSC_SELECTION_TIMEOUT 0x0A00
-#define I2O_SCSI_DSC_COMMAND_TIMEOUT 0x0B00
-#define I2O_SCSI_DSC_MR_MESSAGE_RECEIVED 0x0D00
-#define I2O_SCSI_DSC_SCSI_BUS_RESET 0x0E00
-#define I2O_SCSI_DSC_PARITY_ERROR_FAILURE 0x0F00
-#define I2O_SCSI_DSC_AUTOSENSE_FAILED 0x1000
-#define I2O_SCSI_DSC_NO_ADAPTER 0x1100
-#define I2O_SCSI_DSC_DATA_OVERRUN 0x1200
-#define I2O_SCSI_DSC_UNEXPECTED_BUS_FREE 0x1300
-#define I2O_SCSI_DSC_SEQUENCE_FAILURE 0x1400
-#define I2O_SCSI_DSC_REQUEST_LENGTH_ERROR 0x1500
-#define I2O_SCSI_DSC_PROVIDE_FAILURE 0x1600
-#define I2O_SCSI_DSC_BDR_MESSAGE_SENT 0x1700
-#define I2O_SCSI_DSC_REQUEST_TERMINATED 0x1800
-#define I2O_SCSI_DSC_IDE_MESSAGE_SENT 0x3300
-#define I2O_SCSI_DSC_RESOURCE_UNAVAILABLE 0x3400
-#define I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT 0x3500
-#define I2O_SCSI_DSC_MESSAGE_RECEIVED 0x3600
-#define I2O_SCSI_DSC_INVALID_CDB 0x3700
-#define I2O_SCSI_DSC_LUN_INVALID 0x3800
-#define I2O_SCSI_DSC_SCSI_TID_INVALID 0x3900
-#define I2O_SCSI_DSC_FUNCTION_UNAVAILABLE 0x3A00
-#define I2O_SCSI_DSC_NO_NEXUS 0x3B00
-#define I2O_SCSI_DSC_SCSI_IID_INVALID 0x3C00
-#define I2O_SCSI_DSC_CDB_RECEIVED 0x3D00
-#define I2O_SCSI_DSC_LUN_ALREADY_ENABLED 0x3E00
-#define I2O_SCSI_DSC_BUS_BUSY 0x3F00
-#define I2O_SCSI_DSC_QUEUE_FROZEN 0x4000
-
-
-#ifndef TRUE
-#define TRUE 1
-#define FALSE 0
-#endif
-
-#define HBA_FLAGS_INSTALLED_B 0x00000001 // Adapter Was Installed
-#define HBA_FLAGS_BLINKLED_B 0x00000002 // Adapter In Blink LED State
-#define HBA_FLAGS_IN_RESET 0x00000040 /* in reset */
-#define HBA_HOSTRESET_FAILED 0x00000080 /* adpt_resethost failed */
-
-
-// Device state flags
-#define DPTI_DEV_ONLINE 0x00
-#define DPTI_DEV_UNSCANNED 0x01
-#define DPTI_DEV_RESET 0x02
-#define DPTI_DEV_OFFLINE 0x04
-
-
-struct adpt_device {
- struct adpt_device* next_lun;
- u32 flags;
- u32 type;
- u32 capacity;
- u32 block_size;
- u8 scsi_channel;
- u8 scsi_id;
- u64 scsi_lun;
- u8 state;
- u16 tid;
- struct i2o_device* pI2o_dev;
- struct scsi_device *pScsi_dev;
-};
-
-struct adpt_channel {
- struct adpt_device* device[MAX_ID]; /* used as an array of 128 scsi ids */
- u8 scsi_id;
- u8 type;
- u16 tid;
- u32 state;
- struct i2o_device* pI2o_dev;
-};
-
-// HBA state flags
-#define DPTI_STATE_RESET (0x01)
-
-typedef struct _adpt_hba {
- struct _adpt_hba *next;
- struct pci_dev *pDev;
- struct Scsi_Host *host;
- u32 state;
- spinlock_t state_lock;
- int unit;
- int host_no; /* SCSI host number */
- u8 initialized;
- u8 in_use; /* is the management node open*/
-
- char name[32];
- char detail[55];
-
- void __iomem *base_addr_virt;
- void __iomem *msg_addr_virt;
- ulong base_addr_phys;
- void __iomem *post_port;
- void __iomem *reply_port;
- void __iomem *irq_mask;
- u16 post_count;
- u32 post_fifo_size;
- u32 reply_fifo_size;
- u32* reply_pool;
- dma_addr_t reply_pool_pa;
- u32 sg_tablesize; // Scatter/Gather List Size.
- u8 top_scsi_channel;
- u8 top_scsi_id;
- u64 top_scsi_lun;
- u8 dma64;
-
- i2o_status_block* status_block;
- dma_addr_t status_block_pa;
- i2o_hrt* hrt;
- dma_addr_t hrt_pa;
- i2o_lct* lct;
- dma_addr_t lct_pa;
- uint lct_size;
- struct i2o_device* devices;
- struct adpt_channel channel[MAX_CHANNEL];
- struct proc_dir_entry* proc_entry; /* /proc dir */
-
- void __iomem *FwDebugBuffer_P; // Virtual Address Of FW Debug Buffer
- u32 FwDebugBufferSize; // FW Debug Buffer Size In Bytes
- void __iomem *FwDebugStrLength_P;// Virtual Addr Of FW Debug String Len
- void __iomem *FwDebugFlags_P; // Virtual Address Of FW Debug Flags
- void __iomem *FwDebugBLEDflag_P;// Virtual Addr Of FW Debug BLED
- void __iomem *FwDebugBLEDvalue_P;// Virtual Addr Of FW Debug BLED
- u32 FwDebugFlags;
- u32 *ioctl_reply_context[4];
-} adpt_hba;
-
-struct sg_simple_element {
- u32 flag_count;
- u32 addr_bus;
-};
-
-/*
- * Function Prototypes
- */
-
-static void adpt_i2o_sys_shutdown(void);
-static int adpt_init(void);
-static int adpt_i2o_build_sys_table(void);
-static irqreturn_t adpt_isr(int irq, void *dev_id);
-
-static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d);
-static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
- int group, int field, void *buf, int buflen);
-#ifdef DEBUG
-static const char *adpt_i2o_get_class_name(int class);
-#endif
-static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
- void *opblk, dma_addr_t opblk_pa, int oplen,
- void *resblk, dma_addr_t resblk_pa, int reslen);
-static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout);
-static int adpt_i2o_lct_get(adpt_hba* pHba);
-static int adpt_i2o_parse_lct(adpt_hba* pHba);
-static int adpt_i2o_activate_hba(adpt_hba* pHba);
-static int adpt_i2o_enable_hba(adpt_hba* pHba);
-static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d);
-static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len);
-static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba);
-static s32 adpt_i2o_status_get(adpt_hba* pHba);
-static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba);
-static s32 adpt_i2o_hrt_get(adpt_hba* pHba);
-static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* dptdevice);
-static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd);
-static s32 adpt_scsi_host_alloc(adpt_hba* pHba,struct scsi_host_template * sht);
-static s32 adpt_hba_reset(adpt_hba* pHba);
-static s32 adpt_i2o_reset_hba(adpt_hba* pHba);
-static s32 adpt_rescan(adpt_hba* pHba);
-static s32 adpt_i2o_reparse_lct(adpt_hba* pHba);
-static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
-static void adpt_i2o_delete_hba(adpt_hba* pHba);
-static void adpt_inquiry(adpt_hba* pHba);
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
-static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
-static int adpt_i2o_online_hba(adpt_hba* pHba);
-static void adpt_i2o_post_wait_complete(u32, int);
-static int adpt_i2o_systab_send(adpt_hba* pHba);
-
-static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg);
-static int adpt_open(struct inode *inode, struct file *file);
-static int adpt_close(struct inode *inode, struct file *file);
-
-
-#ifdef UARTDELAY
-static void adpt_delay(int millisec);
-#endif
-
-#define PRINT_BUFFER_SIZE 512
-
-#define HBA_FLAGS_DBG_FLAGS_MASK 0xffff0000 // Mask for debug flags
-#define HBA_FLAGS_DBG_KERNEL_PRINT_B 0x00010000 // Kernel Debugger Print
-#define HBA_FLAGS_DBG_FW_PRINT_B 0x00020000 // Firmware Debugger Print
-#define HBA_FLAGS_DBG_FUNCTION_ENTRY_B 0x00040000 // Function Entry Point
-#define HBA_FLAGS_DBG_FUNCTION_EXIT_B 0x00080000 // Function Exit
-#define HBA_FLAGS_DBG_ERROR_B 0x00100000 // Error Conditions
-#define HBA_FLAGS_DBG_INIT_B 0x00200000 // Init Prints
-#define HBA_FLAGS_DBG_OS_COMMANDS_B 0x00400000 // OS Command Info
-#define HBA_FLAGS_DBG_SCAN_B 0x00800000 // Device Scan
-
-#define FW_DEBUG_STR_LENGTH_OFFSET 0
-#define FW_DEBUG_FLAGS_OFFSET 4
-#define FW_DEBUG_BLED_OFFSET 8
-
-#define FW_DEBUG_FLAGS_NO_HEADERS_B 0x01
-#endif /* !HOSTS_C */
-#endif /* _DPT_H */
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index b2b61bc45f12..b08fc8839808 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -261,7 +261,7 @@ efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
dma.size = FW_WRITE_BUFSIZE;
dma.virt = dma_alloc_coherent(&efct->pci->dev,
- dma.size, &dma.phys, GFP_DMA);
+ dma.size, &dma.phys, GFP_KERNEL);
if (!dma.virt)
return -ENOMEM;
@@ -541,13 +541,10 @@ efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, efct);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
- dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
- rc = -1;
- goto dma_mask_out;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
+ goto dma_mask_out;
}
num_interrupts = efct_device_interrupts_required(efct);
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index ba8256b4c782..5a5525054d71 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -516,7 +516,7 @@ efct_hw_setup_io(struct efct_hw *hw)
dma = &hw->xfer_rdy;
dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
dma->virt = dma_alloc_coherent(&efct->pci->dev,
- dma->size, &dma->phys, GFP_DMA);
+ dma->size, &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -ENOMEM;
}
@@ -562,7 +562,7 @@ efct_hw_setup_io(struct efct_hw *hw)
sizeof(struct sli4_sge);
dma->virt = dma_alloc_coherent(&efct->pci->dev,
dma->size, &dma->phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!dma->virt) {
efc_log_err(hw->os, "dma_alloc fail %d\n", i);
memset(&io->def_sgl, 0,
@@ -618,7 +618,7 @@ efct_hw_init_prereg_io(struct efct_hw *hw)
memset(&req, 0, sizeof(struct efc_dma));
req.size = 32 + sgls_per_request * 16;
req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!req.virt) {
kfree(sgls);
return -ENOMEM;
@@ -1063,7 +1063,7 @@ efct_hw_init(struct efct_hw *hw)
dma = &hw->loop_map;
dma->size = SLI4_MIN_LOOP_MAP_BYTES;
dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -1192,7 +1192,7 @@ efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
prq->dma.size,
&prq->dma.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!prq->dma.virt) {
efc_log_err(hw->os, "DMA allocation failed\n");
kfree(rq_buf);
@@ -1402,7 +1402,6 @@ efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
mutex_lock(&hw->bmbx_lock);
bmbx = hw->sli.bmbx.virt;
- memset(bmbx, 0, SLI4_BMBX_SIZE);
memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
if (sli_bmbx_command(&hw->sli) == 0) {
diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c
index 71e21655916a..c612f0a48839 100644
--- a/drivers/scsi/elx/efct/efct_io.c
+++ b/drivers/scsi/elx/efct/efct_io.c
@@ -48,7 +48,7 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
io->rspbuf.size,
- &io->rspbuf.phys, GFP_DMA);
+ &io->rspbuf.phys, GFP_KERNEL);
if (!io->rspbuf.virt) {
efc_log_err(efct, "dma_alloc rspbuf failed\n");
efct_io_pool_free(io_pool);
@@ -62,7 +62,6 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
return NULL;
}
- memset(io->sgl, 0, sizeof(*io->sgl) * num_sgl);
io->sgl_allocated = num_sgl;
io->sgl_count = 0;
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index 8b004a5818d6..be4b5c1ee32d 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -370,9 +370,6 @@ static int efct_lio_get_cmd_state(struct se_cmd *cmd)
container_of(cmd, struct efct_scsi_tgt_io, cmd);
struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
- if (!io)
- return 0;
-
return io->tgt_io.state;
}
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
index f8665d48904a..da4ac8a4ce12 100644
--- a/drivers/scsi/elx/libefc/efc_cmds.c
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -179,7 +179,7 @@ efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
nport->dma.size = EFC_SPARAM_DMA_SZ;
nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
nport->dma.size, &nport->dma.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!nport->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
@@ -466,7 +466,7 @@ efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
domain->dma.size = EFC_SPARAM_DMA_SZ;
domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
domain->dma.size,
- &domain->dma.phys, GFP_DMA);
+ &domain->dma.phys, GFP_KERNEL);
if (!domain->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
return -EIO;
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
index 24db0accb256..84bc81d7ce76 100644
--- a/drivers/scsi/elx/libefc/efc_els.c
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -46,18 +46,14 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
efc = node->efc;
- spin_lock_irqsave(&node->els_ios_lock, flags);
-
if (!node->els_io_enabled) {
efc_log_err(efc, "els io alloc disabled\n");
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
if (!els) {
atomic_add_return(1, &efc->els_io_alloc_failed_count);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
@@ -71,16 +67,15 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* now allocate DMA for request and response */
els->io.req.size = reqlen;
els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
- &els->io.req.phys, GFP_DMA);
+ &els->io.req.phys, GFP_KERNEL);
if (!els->io.req.virt) {
mempool_free(els, efc->els_io_pool);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els->io.rsp.size = rsplen;
els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
- &els->io.rsp.phys, GFP_DMA);
+ &els->io.rsp.phys, GFP_KERNEL);
if (!els->io.rsp.virt) {
dma_free_coherent(&efc->pci->dev, els->io.req.size,
els->io.req.virt, els->io.req.phys);
@@ -94,10 +89,11 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* add els structure to ELS IO list */
INIT_LIST_HEAD(&els->list_entry);
+ spin_lock_irqsave(&node->els_ios_lock, flags);
list_add_tail(&els->list_entry, &node->els_ios_list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
}
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return els;
}
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 907d67aeac23..b8c048cdb17f 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -445,7 +445,7 @@ sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
- &dma->phys, GFP_DMA);
+ &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -508,7 +508,7 @@ __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
q->dma.size = size * n_entries;
q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
- &q->dma.phys, GFP_DMA);
+ &q->dma.phys, GFP_KERNEL);
if (!q->dma.virt) {
memset(&q->dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
@@ -849,7 +849,7 @@ static int sli_cmd_cq_set_create(struct sli4 *sli4,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
- &dma->phys, GFP_DMA);
+ &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -4127,7 +4127,7 @@ sli_calc_max_qentries(struct sli4 *sli4)
sli4->qinfo.count_mask[q]);
}
- /* single, continguous DMA allocations will be called for each queue
+ /* single, contiguous DMA allocations will be called for each queue
* of size (max_qentries * queue entry size); since these can be large,
* check against the OS max DMA allocation size
*/
@@ -4413,7 +4413,7 @@ sli_get_ctrl_attributes(struct sli4 *sli4)
psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
data.size = psize;
data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
- &data.phys, GFP_DMA);
+ &data.phys, GFP_KERNEL);
if (!data.virt) {
memset(&data, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
@@ -4653,7 +4653,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
*/
sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
- &sli4->bmbx.phys, GFP_DMA);
+ &sli4->bmbx.phys, GFP_KERNEL);
if (!sli4->bmbx.virt) {
memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
@@ -4674,7 +4674,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
sli4->vpd_data.size,
&sli4->vpd_data.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!sli4->vpd_data.virt) {
memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
/* Note that failure isn't fatal in this specific case */
@@ -5070,7 +5070,7 @@ sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
payload_dma->size = payload_size;
payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
payload_dma->size,
- &payload_dma->phys, GFP_DMA);
+ &payload_dma->phys, GFP_KERNEL);
if (!payload_dma->virt) {
memset(payload_dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "mbox payload memory allocation fail\n");
diff --git a/drivers/scsi/elx/libefc_sli/sli4.h b/drivers/scsi/elx/libefc_sli/sli4.h
index ee2a9e65a88d..38af166cc786 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.h
+++ b/drivers/scsi/elx/libefc_sli/sli4.h
@@ -609,7 +609,7 @@ struct sli4_rqst_cmn_create_cq_v2 {
__le16 cqe_count;
__le16 rsvd30;
__le32 rsvd32;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
enum sli4_create_cqset_e {
@@ -634,7 +634,7 @@ struct sli4_rqst_cmn_create_cq_set_v0 {
__le16 num_cq_req;
__le16 dw6w1_flags;
__le16 eq_id[16];
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
/* CQE count */
@@ -764,7 +764,7 @@ struct sli4_rqst_cmn_create_mq_ext {
__le32 dw7_val;
__le32 dw8_flags;
__le32 rsvd36;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
struct sli4_rsp_cmn_create_mq_ext {
@@ -802,7 +802,7 @@ struct sli4_rqst_cmn_create_cq_v0 {
__le32 dw6_flags;
__le32 rsvd28;
__le32 rsvd32;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
enum sli4_create_rq_e {
@@ -887,7 +887,7 @@ struct sli4_rqst_rq_create_v2 {
__le16 base_cq_id;
__le16 rsvd26;
__le32 rsvd42;
- struct sli4_dmaaddr page_phys_addr[0];
+ struct sli4_dmaaddr page_phys_addr[];
};
struct sli4_rsp_rq_create_v2 {
@@ -3168,7 +3168,7 @@ struct sli4_rqst_cmn_read_object {
__le32 read_offset;
u8 object_name[104];
__le32 host_buffer_descriptor_count;
- struct sli4_bde host_buffer_descriptor[0];
+ struct sli4_bde host_buffer_descriptor[];
};
#define RSP_COM_READ_OBJ_EOF 0x80000000
@@ -3191,7 +3191,7 @@ struct sli4_rqst_cmn_write_object {
__le32 write_offset;
u8 object_name[104];
__le32 host_buffer_descriptor_count;
- struct sli4_bde host_buffer_descriptor[0];
+ struct sli4_bde host_buffer_descriptor[];
};
#define RSP_CHANGE_STATUS 0xff
@@ -3217,7 +3217,7 @@ struct sli4_rqst_cmn_read_object_list {
__le32 read_offset;
u8 object_name[104];
__le32 host_buffer_descriptor_count;
- struct sli4_bde host_buffer_descriptor[0];
+ struct sli4_bde host_buffer_descriptor[];
};
enum sli4_rqst_set_dump_flags {
@@ -3342,7 +3342,7 @@ struct sli4_rspource_descriptor_v1 {
u8 descriptor_type;
u8 descriptor_length;
__le16 rsvd16;
- __le32 type_specific[0];
+ __le32 type_specific[];
};
enum sli4_pcie_desc_flags {
@@ -3474,7 +3474,7 @@ struct sli4_rqst_post_hdr_templates {
struct sli4_rqst_hdr hdr;
__le16 rpi_offset;
__le16 page_count;
- struct sli4_dmaaddr page_descriptor[0];
+ struct sli4_dmaaddr page_descriptor[];
};
#define SLI4_HDR_TEMPLATE_SIZE 64
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
index ff2ad9b38575..dd3437412ffc 100644
--- a/drivers/scsi/esas2r/atioctl.h
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -831,6 +831,7 @@ struct __packed atto_hba_trace {
u32 total_length;
u32 trace_mask;
u8 reserved2[48];
+ u8 contents[];
};
#define ATTO_FUNC_SCSI_PASS_THRU 0x04
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index 429d64299fe9..f910e2553fbb 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -232,7 +232,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
*/
rq->req_stat = RS_PENDING;
if (test_bit(AF_DEGRADED_MODE, &a->flags))
- /* not suppported for now */;
+ /* not supported for now */;
else
build_flash_msg(a, rq);
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 08f4e43c7d9e..e003d923acbf 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -947,10 +947,9 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,
break;
}
- memcpy(trc + 1,
+ memcpy(trc->contents,
a->fw_coredump_buff + offset,
len);
-
hi->data_length = len;
} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
memset(a->fw_coredump_buff, 0,
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 57787537285a..64ec6bb84550 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -2678,6 +2678,7 @@ struct scsi_host_template scsi_esp_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.skip_settle_delay = 1,
+ .cmd_size = sizeof(struct esp_cmd_priv),
};
EXPORT_SYMBOL(scsi_esp_template);
@@ -2739,9 +2740,6 @@ static struct spi_function_template esp_transport_ops = {
static int __init esp_init(void)
{
- BUILD_BUG_ON(sizeof(struct scsi_pointer) <
- sizeof(struct esp_cmd_priv));
-
esp_transport_template = spi_attach_transport(&esp_transport_ops);
if (!esp_transport_template)
return -ENODEV;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 446a3d18c022..c73760d3cf83 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -262,7 +262,8 @@ struct esp_cmd_priv {
struct scatterlist *cur_sg;
int tot_residue;
};
-#define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp))
+
+#define ESP_CMD_PRIV(cmd) ((struct esp_cmd_priv *)scsi_cmd_priv(cmd))
/* NOTE: this enum is ordered based on chip features! */
enum esp_rev {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 6415f88738ad..6ec296321ffc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = {
.sg_tablesize = SG_ALL,
.max_sectors = 0xffff,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct libfc_cmd_priv),
};
/**
@@ -666,7 +667,7 @@ static void fcoe_netdev_features_change(struct fc_lport *lport,
if (netdev->features & NETIF_F_FSO) {
lport->seq_offload = 1;
- lport->lso_max = netdev->gso_max_size;
+ lport->lso_max = min(netdev->gso_max_size, GSO_LEGACY_MAX_SIZE);
FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
lport->lso_max);
} else {
@@ -1433,8 +1434,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
return NET_RX_SUCCESS;
err:
- per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
- put_cpu();
+ this_cpu_inc(lport->stats->ErrorFrames);
err2:
kfree_skb(skb);
return NET_RX_DROP;
@@ -1452,9 +1452,10 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
struct fcoe_percpu_s *fps;
int rc;
- fps = &get_cpu_var(fcoe_percpu);
+ local_lock(&fcoe_percpu.lock);
+ fps = this_cpu_ptr(&fcoe_percpu);
rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
- put_cpu_var(fcoe_percpu);
+ local_unlock(&fcoe_percpu.lock);
return rc;
}
@@ -1473,7 +1474,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct ethhdr *eh;
struct fcoe_crc_eof *cp;
struct sk_buff *skb;
- struct fc_stats *stats;
struct fc_frame_header *fh;
unsigned int hlen; /* header length implies the version */
unsigned int tlen; /* trailer length */
@@ -1488,7 +1488,6 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
- wlen = skb->len / FCOE_WORD_TO_BYTE;
if (!lport->link_up) {
kfree_skb(skb);
@@ -1584,10 +1583,8 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->TxFrames++;
- stats->TxWords += wlen;
- put_cpu();
+ this_cpu_inc(lport->stats->TxFrames);
+ this_cpu_add(lport->stats->TxWords, wlen);
/* send down to lld */
fr_dev(fp) = lport;
@@ -1609,7 +1606,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
- struct fc_stats *stats;
/*
* We only check CRC if no offload is available and if it is
@@ -1639,11 +1635,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
}
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->InvalidCRCCount++;
- if (stats->InvalidCRCCount < 5)
+ if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
- put_cpu();
return -EINVAL;
}
@@ -1656,7 +1649,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
u32 fr_len;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
- struct fc_stats *stats;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_hdr *hp;
@@ -1684,9 +1676,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- stats = per_cpu_ptr(lport->stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
- if (stats->ErrorFrames < 5)
+ struct fc_stats *stats;
+
+ stats = per_cpu_ptr(lport->stats, raw_smp_processor_id());
+ if (READ_ONCE(stats->ErrorFrames) < 5)
printk(KERN_WARNING "fcoe: FCoE version "
"mismatch: The frame has "
"version %x, but the "
@@ -1699,8 +1693,8 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ this_cpu_inc(lport->stats->RxFrames);
+ this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE);
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
@@ -1716,13 +1710,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
- put_cpu();
fc_exch_recv(lport, fp);
return;
}
drop:
- stats->ErrorFrames++;
- put_cpu();
+ this_cpu_inc(lport->stats->ErrorFrames);
kfree_skb(skb);
}
@@ -1846,7 +1838,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
- struct fc_stats *stats;
u32 link_possible = 1;
u32 mfs;
int rc = NOTIFY_OK;
@@ -1920,9 +1911,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case FCOE_CTLR_ENABLED:
case FCOE_CTLR_UNUSED:
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->LinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->LinkFailureCount);
fcoe_clean_pending_queue(lport);
}
}
@@ -2487,6 +2476,7 @@ static int __init fcoe_init(void)
p = per_cpu_ptr(&fcoe_percpu, cpu);
INIT_WORK(&p->work, fcoe_receive_work);
skb_queue_head_init(&p->fcoe_rx_list);
+ local_lock_init(&p->lock);
}
/* Setup link change notification */
@@ -2579,7 +2569,7 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
/* pre-FIP */
if (is_zero_ether_addr(mac))
fcoe_ctlr_recv_flogi(fip, lport, fp);
- if (!is_zero_ether_addr(mac))
+ else
fcoe_update_src_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 1756a0ac6f08..ddc048069af2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -824,22 +824,21 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long deadline;
unsigned long sel_time = 0;
struct list_head del_list;
- struct fc_stats *stats;
INIT_LIST_HEAD(&del_list);
- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
-
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
if (fip->sel_fcf == fcf) {
if (time_after(jiffies, deadline)) {
- stats->MissDiscAdvCount++;
+ u64 miss_cnt;
+
+ miss_cnt = this_cpu_inc_return(fip->lp->stats->MissDiscAdvCount);
printk(KERN_INFO "libfcoe: host%d: "
"Missing Discovery Advertisement "
"for fab %16.16llx count %lld\n",
fip->lp->host->host_no, fcf->fabric_name,
- stats->MissDiscAdvCount);
+ miss_cnt);
} else if (time_after(next_timer, deadline))
next_timer = deadline;
}
@@ -855,7 +854,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
*/
list_del(&fcf->list);
list_add(&fcf->list, &del_list);
- stats->VLinkFailureCount++;
+ this_cpu_inc(fip->lp->stats->VLinkFailureCount);
} else {
if (time_after(next_timer, deadline))
next_timer = deadline;
@@ -864,7 +863,6 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
sel_time = fcf->time;
}
}
- put_cpu();
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
@@ -1142,7 +1140,6 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fip_desc *desc;
struct fip_encaps *els;
struct fcoe_fcf *sel;
- struct fc_stats *stats;
enum fip_desc_type els_dtype = 0;
u8 els_op;
u8 sub;
@@ -1286,10 +1283,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
fr_dev(fp) = lport;
fr_encaps(fp) = els_dtype;
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->RxFrames++;
- stats->RxWords += skb->len / FIP_BPW;
- put_cpu();
+ this_cpu_inc(lport->stats->RxFrames);
+ this_cpu_add(lport->stats->RxWords, skb->len / FIP_BPW);
fc_exch_recv(lport, fp);
return;
@@ -1427,9 +1422,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
ntoh24(vp->fd_fc_id));
if (vn_port && (vn_port == lport)) {
mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->stats,
- get_cpu())->VLinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
}
@@ -1457,8 +1450,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* followed by physical port
*/
mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
- put_cpu();
+ this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
@@ -1969,7 +1961,7 @@ EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
*
* Returns: u64 fc world wide name
*/
-u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+u64 fcoe_wwn_from_mac(unsigned char mac[ETH_ALEN],
unsigned int scheme, unsigned int port)
{
u64 wwn;
@@ -2241,7 +2233,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
fip->probe_tries++;
- wait = prandom_u32() % FIP_VN_PROBE_WAIT;
+ wait = prandom_u32_max(FIP_VN_PROBE_WAIT);
} else
wait = FIP_VN_RLIM_INT;
mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
@@ -3133,7 +3125,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
fcoe_all_vn2vn, 0);
fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_BEACON_INT +
- (prandom_u32() % FIP_VN_BEACON_FUZZ));
+ prandom_u32_max(FIP_VN_BEACON_FUZZ));
}
if (time_before(fip->port_ka_time, next_time))
next_time = fip->port_ka_time;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 4d0e19e7c84b..62341c6353a7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -183,9 +183,9 @@ void __fcoe_get_lesb(struct fc_lport *lport,
memset(lesb, 0, sizeof(*lesb));
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(lport->stats, cpu);
- lfc += stats->LinkFailureCount;
- vlfc += stats->VLinkFailureCount;
- mdac += stats->MissDiscAdvCount;
+ lfc += READ_ONCE(stats->LinkFailureCount);
+ vlfc += READ_ONCE(stats->VLinkFailureCount);
+ mdac += READ_ONCE(stats->MissDiscAdvCount);
}
lesb->lesb_link_fail = htonl(lfc);
lesb->lesb_vlink_fail = htonl(vlfc);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 9159b4057c5d..444eac9b2466 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -115,6 +115,11 @@ struct fdomain {
struct work_struct work;
};
+static struct scsi_pointer *fdomain_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline void fdomain_make_bus_idle(struct fdomain *fd)
{
outb(0, fd->base + REG_BCTL);
@@ -263,20 +268,21 @@ static void fdomain_work(struct work_struct *work)
struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host,
hostdata);
struct scsi_cmnd *cmd = fd->cur_cmd;
+ struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
unsigned long flags;
int status;
int done = 0;
spin_lock_irqsave(sh->host_lock, flags);
- if (cmd->SCp.phase & in_arbitration) {
+ if (scsi_pointer->phase & in_arbitration) {
status = inb(fd->base + REG_ASTAT);
if (!(status & ASTAT_ARB)) {
set_host_byte(cmd, DID_BUS_BUSY);
fdomain_finish_cmd(fd);
goto out;
}
- cmd->SCp.phase = in_selection;
+ scsi_pointer->phase = in_selection;
outb(ICTL_SEL | FIFO_COUNT, fd->base + REG_ICTL);
outb(BCTL_BUSEN | BCTL_SEL, fd->base + REG_BCTL);
@@ -285,7 +291,7 @@ static void fdomain_work(struct work_struct *work)
/* Stop arbitration and enable parity */
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
goto out;
- } else if (cmd->SCp.phase & in_selection) {
+ } else if (scsi_pointer->phase & in_selection) {
status = inb(fd->base + REG_BSTAT);
if (!(status & BSTAT_BSY)) {
/* Try again, for slow devices */
@@ -297,75 +303,75 @@ static void fdomain_work(struct work_struct *work)
/* Stop arbitration and enable parity */
outb(ACTL_IRQEN | PARITY_MASK, fd->base + REG_ACTL);
}
- cmd->SCp.phase = in_other;
+ scsi_pointer->phase = in_other;
outb(ICTL_FIFO | ICTL_REQ | FIFO_COUNT, fd->base + REG_ICTL);
outb(BCTL_BUSEN, fd->base + REG_BCTL);
goto out;
}
- /* cur_cmd->SCp.phase == in_other: this is the body of the routine */
+ /* fdomain_scsi_pointer(cur_cmd)->phase == in_other: this is the body of the routine */
status = inb(fd->base + REG_BSTAT);
if (status & BSTAT_REQ) {
switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) {
case BSTAT_CMD: /* COMMAND OUT */
- outb(cmd->cmnd[cmd->SCp.sent_command++],
+ outb(cmd->cmnd[scsi_pointer->sent_command++],
fd->base + REG_SCSI_DATA);
break;
case 0: /* DATA OUT -- tmc18c50/tmc18c30 only */
- if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
- cmd->SCp.have_data_in = -1;
+ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
+ scsi_pointer->have_data_in = -1;
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
PARITY_MASK, fd->base + REG_ACTL);
}
break;
case BSTAT_IO: /* DATA IN -- tmc18c50/tmc18c30 only */
- if (fd->chip != tmc1800 && !cmd->SCp.have_data_in) {
- cmd->SCp.have_data_in = 1;
+ if (fd->chip != tmc1800 && !scsi_pointer->have_data_in) {
+ scsi_pointer->have_data_in = 1;
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
fd->base + REG_ACTL);
}
break;
case BSTAT_CMD | BSTAT_IO: /* STATUS IN */
- cmd->SCp.Status = inb(fd->base + REG_SCSI_DATA);
+ scsi_pointer->Status = inb(fd->base + REG_SCSI_DATA);
break;
case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */
outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA);
break;
case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
- cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
- if (cmd->SCp.Message == COMMAND_COMPLETE)
+ scsi_pointer->Message = inb(fd->base + REG_SCSI_DATA);
+ if (scsi_pointer->Message == COMMAND_COMPLETE)
++done;
break;
}
}
- if (fd->chip == tmc1800 && !cmd->SCp.have_data_in &&
- cmd->SCp.sent_command >= cmd->cmd_len) {
+ if (fd->chip == tmc1800 && !scsi_pointer->have_data_in &&
+ scsi_pointer->sent_command >= cmd->cmd_len) {
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd->SCp.have_data_in = -1;
+ scsi_pointer->have_data_in = -1;
outb(ACTL_IRQEN | ACTL_FIFOWR | ACTL_FIFOEN |
PARITY_MASK, fd->base + REG_ACTL);
} else {
- cmd->SCp.have_data_in = 1;
+ scsi_pointer->have_data_in = 1;
outb(ACTL_IRQEN | ACTL_FIFOEN | PARITY_MASK,
fd->base + REG_ACTL);
}
}
- if (cmd->SCp.have_data_in == -1) /* DATA OUT */
+ if (scsi_pointer->have_data_in == -1) /* DATA OUT */
fdomain_write_data(cmd);
- if (cmd->SCp.have_data_in == 1) /* DATA IN */
+ if (scsi_pointer->have_data_in == 1) /* DATA IN */
fdomain_read_data(cmd);
if (done) {
- set_status_byte(cmd, cmd->SCp.Status);
+ set_status_byte(cmd, scsi_pointer->Status);
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
fdomain_finish_cmd(fd);
} else {
- if (cmd->SCp.phase & disconnect) {
+ if (scsi_pointer->phase & disconnect) {
outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
fd->base + REG_ICTL);
outb(0, fd->base + REG_BCTL);
@@ -398,14 +404,15 @@ static irqreturn_t fdomain_irq(int irq, void *dev_id)
static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = fdomain_scsi_pointer(cmd);
struct fdomain *fd = shost_priv(cmd->device->host);
unsigned long flags;
- cmd->SCp.Status = 0;
- cmd->SCp.Message = 0;
- cmd->SCp.have_data_in = 0;
- cmd->SCp.sent_command = 0;
- cmd->SCp.phase = in_arbitration;
+ scsi_pointer->Status = 0;
+ scsi_pointer->Message = 0;
+ scsi_pointer->have_data_in = 0;
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->phase = in_arbitration;
scsi_set_resid(cmd, scsi_bufflen(cmd));
spin_lock_irqsave(sh->host_lock, flags);
@@ -440,7 +447,7 @@ static int fdomain_abort(struct scsi_cmnd *cmd)
spin_lock_irqsave(sh->host_lock, flags);
fdomain_make_bus_idle(fd);
- fd->cur_cmd->SCp.phase |= aborted;
+ fdomain_scsi_pointer(fd->cur_cmd)->phase |= aborted;
/* Aborts are not done well. . . */
set_host_byte(fd->cur_cmd, DID_ABORT);
@@ -501,6 +508,7 @@ static struct scsi_host_template fdomain_template = {
.this_id = 7,
.sg_tablesize = 64,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
struct Scsi_Host *fdomain_create(int base, int irq, int this_id,
diff --git a/drivers/scsi/fnic/cq_desc.h b/drivers/scsi/fnic/cq_desc.h
index d1225cf6320e..0eb4ba277264 100644
--- a/drivers/scsi/fnic/cq_desc.h
+++ b/drivers/scsi/fnic/cq_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
diff --git a/drivers/scsi/fnic/cq_enet_desc.h b/drivers/scsi/fnic/cq_enet_desc.h
index a9fa26f82ddd..b6113291cf68 100644
--- a/drivers/scsi/fnic/cq_enet_desc.h
+++ b/drivers/scsi/fnic/cq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
diff --git a/drivers/scsi/fnic/cq_exch_desc.h b/drivers/scsi/fnic/cq_exch_desc.h
index 501660cfe228..4d94329c8ef5 100644
--- a/drivers/scsi/fnic/cq_exch_desc.h
+++ b/drivers/scsi/fnic/cq_exch_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _CQ_EXCH_DESC_H_
#define _CQ_EXCH_DESC_H_
diff --git a/drivers/scsi/fnic/fcpio.h b/drivers/scsi/fnic/fcpio.h
index 12d770d885c5..54a0b2ba8f6f 100644
--- a/drivers/scsi/fnic/fcpio.h
+++ b/drivers/scsi/fnic/fcpio.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FCPIO_H_
#define _FCPIO_H_
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index b95d0063dedb..d82de34f6fd7 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_H_
#define _FNIC_H_
@@ -39,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.53"
+#define DRV_VERSION "1.6.0.54"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -89,15 +77,28 @@
#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
/*
- * Usage of the scsi_cmnd scratchpad.
+ * fnic private data per SCSI command.
* These fields are locked by the hashed io_req_lock.
*/
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-#define CMD_STATE(Cmnd) ((Cmnd)->SCp.phase)
-#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
-#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
-#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
-#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
+struct fnic_cmd_priv {
+ struct fnic_io_req *io_req;
+ enum fnic_ioreq_state state;
+ u32 flags;
+ u16 abts_status;
+ u16 lr_status;
+};
+
+static inline struct fnic_cmd_priv *fnic_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd)
+{
+ struct fnic_cmd_priv *fcmd = fnic_priv(cmd);
+
+ return ((u64)fcmd->flags << 32) | fcmd->state;
+}
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c
index bbe2ca4971b2..a61e0c5e6506 100644
--- a/drivers/scsi/fnic/fnic_attrs.c
+++ b/drivers/scsi/fnic/fnic_attrs.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/string.h>
#include <linux/device.h>
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index e7326505cabb..6fedc3b7d1ab 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/errno.h>
@@ -86,8 +72,7 @@ void fnic_debugfs_terminate(void)
debugfs_remove(fnic_trace_debugfs_root);
fnic_trace_debugfs_root = NULL;
- if (fc_trc_flag)
- vfree(fc_trc_flag);
+ vfree(fc_trc_flag);
}
/*
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1885218f9d15..79ddfaaf71a4 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
index 7761f33ab5d4..79f53029737b 100644
--- a/drivers/scsi/fnic/fnic_fip.h
+++ b/drivers/scsi/fnic/fnic_fip.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_FIP_H_
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index 1cb6a68c8e4e..f4c8769df312 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_IO_H_
#define _FNIC_IO_H_
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 2fb2731f50fb..8896758fed8c 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/string.h>
#include <linux/errno.h>
@@ -332,4 +320,3 @@ void fnic_clear_intr_mode(struct fnic *fnic)
pci_free_irq_vectors(fnic->pdev);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
-
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 44dbaa662d94..1077110ab273 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mempool.h>
@@ -124,6 +112,7 @@ static struct scsi_host_template fnic_host_template = {
.max_sectors = 0xffff,
.shost_groups = fnic_host_groups,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct fnic_cmd_priv),
};
static void
@@ -555,6 +544,39 @@ static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
+static int fnic_scsi_drv_init(struct fnic *fnic)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+
+ /* Configure maximum outstanding IO reqs*/
+ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
+ host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
+ max_t(u32, FNIC_MIN_IO_REQ,
+ fnic->config.io_throttle_count));
+
+ fnic->fnic_max_tag_id = host->can_queue;
+ host->max_lun = fnic->config.luns_per_tgt;
+ host->max_id = FNIC_MAX_FCP_TARGET;
+ host->max_cmd_len = FCOE_MAX_CMD_LEN;
+
+ host->nr_hw_queues = fnic->wq_copy_count;
+ if (host->nr_hw_queues > 1)
+ shost_printk(KERN_ERR, host,
+ "fnic: blk-mq is not supported");
+
+ host->nr_hw_queues = fnic->wq_copy_count = 1;
+
+ shost_printk(KERN_INFO, host,
+ "fnic: can_queue: %d max_lun: %llu",
+ host->can_queue, host->max_lun);
+
+ shost_printk(KERN_INFO, host,
+ "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
+ host->max_id, host->max_cmd_len, host->nr_hw_queues);
+
+ return 0;
+}
+
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
@@ -611,10 +633,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 64-bit first, and
- * fail to 32-bit.
+ * limitation for the device. Try 47-bit first, and
+ * fail to 32-bit. Cisco VIC supports 47 bits only.
*/
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
@@ -695,17 +717,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
- /* Configure Maximum Outstanding IO reqs*/
- if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
- host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
- max_t(u32, FNIC_MIN_IO_REQ,
- fnic->config.io_throttle_count));
- }
- fnic->fnic_max_tag_id = host->can_queue;
-
- host->max_lun = fnic->config.luns_per_tgt;
- host->max_id = FNIC_MAX_FCP_TARGET;
- host->max_cmd_len = FCOE_MAX_CMD_LEN;
+ fnic_scsi_drv_init(fnic);
fnic_get_res_counts(fnic);
@@ -1145,10 +1157,8 @@ static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
- if (fnic_fip_queue) {
- flush_workqueue(fnic_fip_queue);
+ if (fnic_fip_queue)
destroy_workqueue(fnic_fip_queue);
- }
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
@@ -1160,4 +1170,3 @@ static void __exit fnic_cleanup_module(void)
module_init(fnic_init_module);
module_exit(fnic_cleanup_module);
-
diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c
index 50488f8e169d..a1c9cfcace7f 100644
--- a/drivers/scsi/fnic/fnic_res.c
+++ b/drivers/scsi/fnic/fnic_res.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/fnic/fnic_res.h b/drivers/scsi/fnic/fnic_res.h
index ef8aaf2156dd..92a2fcfd3ea9 100644
--- a/drivers/scsi/fnic/fnic_res.h
+++ b/drivers/scsi/fnic/fnic_res.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _FNIC_RES_H_
#define _FNIC_RES_H_
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 88c549f257db..26dbd347156e 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/mempool.h>
#include <linux/errno.h>
@@ -497,8 +485,8 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
- CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
- CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+ fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
+ fnic_priv(sc)->flags = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
@@ -513,7 +501,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc));
+ tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -558,9 +546,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
- CMD_SP(sc) = (char *)io_req;
- CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
+ fnic_priv(sc)->io_req = io_req;
+ fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
/* create copy wq desc and enqueue it */
wq = &fnic->wq_copy[0];
@@ -571,11 +559,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
* refetch the pointer under the lock.
*/
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, 0, 0, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- CMD_SP(sc) = NULL;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
+ io_req = fnic_priv(sc)->io_req;
+ fnic_priv(sc)->io_req = NULL;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
fnic_release_ioreq_buf(fnic, io_req, sc);
@@ -594,7 +581,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
atomic64_read(&fnic_stats->io_stats.active_ios));
/* REVISIT: Use per IO lock in the final code */
- CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
}
out:
cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
@@ -603,8 +590,8 @@ out:
sc->cmnd[5]);
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
- tag, sc, io_req, sg_count, cmd_trace,
- (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ tag, sc, io_req, sg_count, cmd_trace,
+ fnic_flags_and_state(sc));
/* if only we issued IO, will we have the io lock */
if (io_lock_acquired)
@@ -867,11 +854,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
- CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl io_req is null - "
@@ -888,17 +875,17 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
* if SCSI-ML has already issued abort on this command,
* set completion of the IO. The abts path will clean it up
*/
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
/*
* set the FNIC_IO_DONE so that this doesn't get
* flagged as 'out of order' if it was not aborted
*/
- CMD_FLAGS(sc) |= FNIC_IO_DONE;
- CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_IO_DONE;
+ fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
spin_unlock_irqrestore(io_lock, flags);
if(FCPIO_ABORTED == hdr_status)
- CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
@@ -912,7 +899,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
}
/* Mark the IO as complete */
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
icmnd_cmpl = &desc->u.icmnd_cmpl;
@@ -983,10 +970,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
}
/* Break link with the SCSI command */
- CMD_SP(sc) = NULL;
- CMD_FLAGS(sc) |= FNIC_IO_DONE;
-
- spin_unlock_irqrestore(io_lock, flags);
+ fnic_priv(sc)->io_req = NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_DONE;
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
@@ -996,8 +981,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
-
cmd_trace = ((u64)hdr_status << 56) |
(u64)icmnd_cmpl->scsi_status << 48 |
(u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
@@ -1009,8 +992,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
((u64)icmnd_cmpl->_resvd0[1] << 56 |
(u64)icmnd_cmpl->_resvd0[0] << 48 |
jiffies_to_msecs(jiffies - start_time)),
- desc, cmd_trace,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, cmd_trace, fnic_flags_and_state(sc));
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
@@ -1021,6 +1003,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
} else
fnic->lport->host_stats.fcp_control_requests++;
+ /* Call SCSI completion function to complete the IO */
+ scsi_done(sc);
+ spin_unlock_irqrestore(io_lock, flags);
+
+ mempool_free(io_req, fnic->io_req_pool);
+
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1049,9 +1037,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
}
-
- /* Call SCSI completion function to complete the IO */
- scsi_done(sc);
}
/* fnic_fcpio_itmf_cmpl_handler
@@ -1095,12 +1080,12 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
@@ -1115,9 +1100,9 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset abts cmpl recd. id %x status %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
- CMD_ABTS_STATUS(sc) = hdr_status;
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->abts_status = hdr_status;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
@@ -1127,7 +1112,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
case FCPIO_SUCCESS:
break;
case FCPIO_TIMEOUT:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_fw_timeouts);
else
atomic64_inc(
@@ -1139,34 +1124,34 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
(int)(id & FNIC_TAG_MASK));
break;
case FCPIO_IO_NOT_FOUND:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_io_not_found);
else
atomic64_inc(
&term_stats->terminate_io_not_found);
break;
default:
- if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
+ if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_failures);
else
atomic64_inc(
&term_stats->terminate_failures);
break;
}
- if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags);
return;
}
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
- CMD_ABTS_STATUS(sc) = hdr_status;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
+ fnic_priv(sc)->abts_status = hdr_status;
/* If the status is IO not found consider it as success */
if (hdr_status == FCPIO_IO_NOT_FOUND)
- CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
+ fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
- if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
+ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -1185,7 +1170,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
} else {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl, completing IO\n");
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
sc->result = (DID_ERROR << 16);
spin_unlock_irqrestore(io_lock, flags);
@@ -1202,8 +1187,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) |
- CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
@@ -1213,15 +1197,14 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
- CMD_LR_STATUS(sc) = hdr_status;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ fnic_priv(sc)->lr_status = hdr_status;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
- desc, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Terminate pending "
"dev reset cmpl recd. id %d status %s\n",
@@ -1229,14 +1212,13 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_fcpio_status_to_str(hdr_status));
return;
}
- if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
/* Need to wait for terminate completion */
spin_unlock_irqrestore(io_lock, flags);
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
- desc, 0,
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
@@ -1244,8 +1226,8 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_fcpio_status_to_str(hdr_status));
return;
}
- CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -1257,7 +1239,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
} else {
shost_printk(KERN_ERR, fnic->lport->host,
"Unexpected itmf io state %s tag %x\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
spin_unlock_irqrestore(io_lock, flags);
}
@@ -1356,8 +1338,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
return wq_work_done;
}
-static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
{
const int tag = scsi_cmd_to_rq(sc)->tag;
struct fnic *fnic = data;
@@ -1370,21 +1351,21 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
io_lock = fnic_io_lock_tag(fnic, tag);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ io_req = fnic_priv(sc)->io_req;
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
/*
* We will be here only when FW completes reset
* without sending completions for outstanding ios.
*/
- CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req && io_req->dr_done)
complete(io_req->dr_done);
else if (io_req && io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
return true;
- } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
@@ -1393,7 +1374,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data,
goto cleanup_scsi_cmd;
}
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -1417,7 +1398,7 @@ cleanup_scsi_cmd:
atomic64_inc(&fnic_stats->io_stats.io_completions);
/* Complete the command to SCSI */
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
+ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
shost_printk(KERN_ERR, fnic->lport->host,
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
tag, sc);
@@ -1429,7 +1410,7 @@ cleanup_scsi_cmd:
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
scsi_done(sc);
@@ -1468,7 +1449,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
spin_lock_irqsave(io_lock, flags);
/* Get the IO context which this desc refers to */
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
/* fnic interrupts are turned off by now */
@@ -1477,7 +1458,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
goto wq_copy_cleanup_scsi_cmd;
}
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -1496,7 +1477,7 @@ wq_copy_cleanup_scsi_cmd:
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
scsi_done(sc);
}
@@ -1554,8 +1535,7 @@ struct fnic_rport_abort_io_iter_data {
int term_cnt;
};
-static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_rport_abort_io_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
@@ -1571,15 +1551,15 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req || io_req->port_id != iter_data->port_id) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
sc);
@@ -1591,7 +1571,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
* Found IO that is still pending with firmware and
* belongs to rport that went away
*/
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
@@ -1599,20 +1579,20 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_rport_exch_reset: io_req->abts_done is set "
"state is %s\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
}
- if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
shost_printk(KERN_ERR, fnic->lport->host,
"rport_exch_reset "
"IO not yet issued %p tag 0x%x flags "
"%x state %d\n",
- sc, abt_tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
}
- old_ioreq_state = CMD_STATE(sc);
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ old_ioreq_state = fnic_priv(sc)->state;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag |= FNIC_TAG_DEV_RST;
}
@@ -1638,15 +1618,15 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data,
* lun reset
*/
spin_lock_irqsave(io_lock, flags);
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
} else {
spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
else
- CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
@@ -1754,9 +1734,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
- rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+ rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
- CMD_FLAGS(sc) = FNIC_NO_FLAGS;
+ fnic_priv(sc)->flags = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
@@ -1773,11 +1753,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* happened, the completion wont actually complete the command
* and it will be considered as an aborted command
*
- * The CMD_SP will not be cleared except while holding io_req_lock.
+ * .io_req will not be cleared except while holding io_req_lock.
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
@@ -1785,7 +1765,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req->abts_done = &tm_done;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
goto wait_pending;
}
@@ -1814,9 +1794,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* the completion wont be done till mid-layer, since abort
* has already started.
*/
- old_ioreq_state = CMD_STATE(sc);
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ old_ioreq_state = fnic_priv(sc)->state;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
@@ -1838,9 +1818,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
io_req)) {
spin_lock_irqsave(io_lock, flags);
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -1848,10 +1828,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
goto fnic_abort_cmd_end;
}
if (task_req == FCPIO_ITMF_ABT_TASK) {
- CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
atomic64_inc(&fnic_stats->abts_stats.aborts);
} else {
- CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
atomic64_inc(&fnic_stats->term_stats.terminates);
}
@@ -1869,32 +1849,32 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* Check the abort status */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
io_req->abts_done = NULL;
/* fw did not complete abort, timed out */
- if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
+ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
if (task_req == FCPIO_ITMF_ABT_TASK) {
atomic64_inc(&abts_stats->abort_drv_timeouts);
} else {
atomic64_inc(&term_stats->terminate_drv_timeouts);
}
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
/* IO out of order */
- if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issuing Host reset due to out of order IO\n");
@@ -1903,7 +1883,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
goto fnic_abort_cmd_end;
}
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
start_time = io_req->start_time;
/*
@@ -1911,9 +1891,9 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
* free the io_req if successful. If abort fails,
* Device reset will clean the I/O.
*/
- if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
- CMD_SP(sc) = NULL;
- else {
+ if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
+ fnic_priv(sc)->io_req = NULL;
+ } else {
ret = FAILED;
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
@@ -1939,7 +1919,7 @@ fnic_abort_cmd_end:
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from abort cmd type %x %s\n", task_req,
@@ -2009,8 +1989,7 @@ struct fnic_pending_aborts_iter_data {
int ret;
};
-static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
- void *data, bool reserved)
+static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
@@ -2025,12 +2004,10 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
if (sc == iter_data->lr_sc || sc->device != lun_dev)
return true;
- if (reserved)
- return true;
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return true;
@@ -2042,14 +2019,14 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
*/
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Found IO in %s on lun\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
- if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
- (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
+ (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"%s dev rst not pending sc 0x%p\n", __func__,
sc);
@@ -2060,8 +2037,8 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
if (io_req->abts_done)
shost_printk(KERN_ERR, fnic->lport->host,
"%s: io_req->abts_done is set state is %s\n",
- __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
- old_ioreq_state = CMD_STATE(sc);
+ __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ old_ioreq_state = fnic_priv(sc)->state;
/*
* Any pending IO issued prior to reset is expected to be
* in abts pending state, if not we need to set
@@ -2069,17 +2046,17 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
* When IO is completed, the IO will be handed over and
* handled in this function.
*/
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
BUG_ON(io_req->abts_done);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
abt_tag |= FNIC_TAG_DEV_RST;
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"%s: dev rst sc 0x%p\n", __func__, sc);
}
- CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
@@ -2090,48 +2067,48 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
- if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
- CMD_STATE(sc) = old_ioreq_state;
+ if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
+ fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
iter_data->ret = FAILED;
return false;
} else {
spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
}
- CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done, msecs_to_jiffies
(fnic->config.ed_tov));
/* Recheck cmd state to check if it is now aborted */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
return true;
}
io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */
- if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
+ if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+ fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
iter_data->ret = FAILED;
return false;
}
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
/* original sc used for lr is handled by dev reset code */
if (sc != iter_data->lr_sc)
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
/* original sc used for lr is handled by dev reset code */
@@ -2272,7 +2249,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
goto fnic_device_reset_end;
}
- CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
/* Allocate tag if not present */
if (unlikely(tag < 0)) {
@@ -2288,7 +2265,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
/*
* If there is a io_req attached to this command, then use it,
@@ -2302,11 +2279,11 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
- CMD_SP(sc) = (char *)io_req;
+ fnic_priv(sc)->io_req = io_req;
}
io_req->dr_done = &tm_done;
- CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
- CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
+ fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
+ fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
@@ -2317,13 +2294,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
spin_lock_irqsave(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
/*
@@ -2334,7 +2311,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2343,7 +2320,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
io_req->dr_done = NULL;
- status = CMD_LR_STATUS(sc);
+ status = fnic_priv(sc)->lr_status;
/*
* If lun reset not completed, bail out with failed. io_req
@@ -2353,7 +2330,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
atomic64_inc(&reset_stats->device_reset_timeouts);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
spin_unlock_irqrestore(io_lock, flags);
int_to_scsilun(sc->device->lun, &fc_lun);
/*
@@ -2362,7 +2339,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
while (1) {
spin_lock_irqsave(io_lock, flags);
- if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
spin_unlock_irqrestore(io_lock, flags);
break;
}
@@ -2375,8 +2352,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
} else {
spin_lock_irqsave(io_lock, flags);
- CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
- CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
+ fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
@@ -2387,13 +2364,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
while (1) {
spin_lock_irqsave(io_lock, flags);
- if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
spin_unlock_irqrestore(io_lock, flags);
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
break;
} else {
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
io_req->abts_done = NULL;
goto fnic_device_reset_clean;
}
@@ -2408,7 +2385,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Device reset completed - failed\n");
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
}
@@ -2421,7 +2398,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
*/
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset failed"
" since could not abort all IOs\n");
@@ -2430,14 +2407,14 @@ int fnic_device_reset(struct scsi_cmnd *sc)
/* Clean lun reset command */
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (io_req)
/* Completed, and successful */
ret = SUCCESS;
fnic_device_reset_clean:
if (io_req)
- CMD_SP(sc) = NULL;
+ fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -2453,7 +2430,7 @@ fnic_device_reset_end:
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
- (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ fnic_flags_and_state(sc));
/* free tag if it is allocated */
if (unlikely(tag_gen_flag))
@@ -2676,8 +2653,7 @@ call_fc_exch_mgr_reset:
}
-static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
- bool reserved)
+static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
@@ -2698,7 +2674,7 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
- io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return true;
@@ -2710,8 +2686,8 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data,
*/
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"Found IO in %s on lun\n",
- fnic_ioreq_state_to_str(CMD_STATE(sc)));
- cmd_state = CMD_STATE(sc);
+ fnic_ioreq_state_to_str(fnic_priv(sc)->state));
+ cmd_state = fnic_priv(sc)->state;
spin_unlock_irqrestore(io_lock, flags);
if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
iter_data->ret = 1;
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 086f729f3c46..bdf639eef8cf 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4a7536bb0ab3..e03967463561 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index 8aa55c1e2025..d1c301bf3fde 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2012 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2012 Cisco Systems, Inc. All rights reserved. */
#ifndef __FNIC_TRACE_H__
#define __FNIC_TRACE_H__
diff --git a/drivers/scsi/fnic/rq_enet_desc.h b/drivers/scsi/fnic/rq_enet_desc.h
index 92e80ae6b725..9bc509d355c4 100644
--- a/drivers/scsi/fnic/rq_enet_desc.h
+++ b/drivers/scsi/fnic/rq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _RQ_ENET_DESC_H_
#define _RQ_ENET_DESC_H_
diff --git a/drivers/scsi/fnic/vnic_cq.c b/drivers/scsi/fnic/vnic_cq.c
index c5db32eda5ef..ed3dd443fe3e 100644
--- a/drivers/scsi/fnic/vnic_cq.c
+++ b/drivers/scsi/fnic/vnic_cq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/fnic/vnic_cq.h b/drivers/scsi/fnic/vnic_cq.h
index 4ede6809fb1e..e7cc1f165390 100644
--- a/drivers/scsi/fnic/vnic_cq.h
+++ b/drivers/scsi/fnic/vnic_cq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
diff --git a/drivers/scsi/fnic/vnic_cq_copy.h b/drivers/scsi/fnic/vnic_cq_copy.h
index 7901ce255a81..1b198ee59dd6 100644
--- a/drivers/scsi/fnic/vnic_cq_copy.h
+++ b/drivers/scsi/fnic/vnic_cq_copy.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_CQ_COPY_H_
#define _VNIC_CQ_COPY_H_
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 5988c300cc82..3e5b437c0492 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h
index ef5309a5df5d..7a568d141cde 100644
--- a/drivers/scsi/fnic/vnic_dev.h
+++ b/drivers/scsi/fnic/vnic_dev.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h
index c20d30e36dfc..f876d223b2b4 100644
--- a/drivers/scsi/fnic/vnic_devcmd.h
+++ b/drivers/scsi/fnic/vnic_devcmd.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
diff --git a/drivers/scsi/fnic/vnic_intr.c b/drivers/scsi/fnic/vnic_intr.c
index 4f4dc8793d23..df7f63acd879 100644
--- a/drivers/scsi/fnic/vnic_intr.c
+++ b/drivers/scsi/fnic/vnic_intr.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/fnic/vnic_intr.h b/drivers/scsi/fnic/vnic_intr.h
index d5fb40e7c98e..acc194c0f522 100644
--- a/drivers/scsi/fnic/vnic_intr.h
+++ b/drivers/scsi/fnic/vnic_intr.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
diff --git a/drivers/scsi/fnic/vnic_nic.h b/drivers/scsi/fnic/vnic_nic.h
index f15b83eeaced..6896f16d564b 100644
--- a/drivers/scsi/fnic/vnic_nic.h
+++ b/drivers/scsi/fnic/vnic_nic.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_NIC_H_
#define _VNIC_NIC_H_
diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h
index 7c6163f73bd3..3d260b831fc5 100644
--- a/drivers/scsi/fnic/vnic_resource.h
+++ b/drivers/scsi/fnic/vnic_resource.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c
index 6a35b1be0032..350607d13c9a 100644
--- a/drivers/scsi/fnic/vnic_rq.c
+++ b/drivers/scsi/fnic/vnic_rq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
@@ -191,4 +179,3 @@ void vnic_rq_clean(struct vnic_rq *rq,
vnic_dev_clear_desc_ring(&rq->ring);
}
-
diff --git a/drivers/scsi/fnic/vnic_rq.h b/drivers/scsi/fnic/vnic_rq.h
index aebdfbd6ad3c..1066255de808 100644
--- a/drivers/scsi/fnic/vnic_rq.h
+++ b/drivers/scsi/fnic/vnic_rq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h
index e343e1d0f801..4e12f7b32d9d 100644
--- a/drivers/scsi/fnic/vnic_scsi.h
+++ b/drivers/scsi/fnic/vnic_scsi.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_SCSI_H_
#define _VNIC_SCSI_H_
diff --git a/drivers/scsi/fnic/vnic_stats.h b/drivers/scsi/fnic/vnic_stats.h
index 5372e23c1cb3..4396397b089d 100644
--- a/drivers/scsi/fnic/vnic_stats.h
+++ b/drivers/scsi/fnic/vnic_stats.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c
index 442972c04e65..426b901c80f3 100644
--- a/drivers/scsi/fnic/vnic_wq.c
+++ b/drivers/scsi/fnic/vnic_wq.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h
index 5d1e0a44d94a..041618e13ce2 100644
--- a/drivers/scsi/fnic/vnic_wq.h
+++ b/drivers/scsi/fnic/vnic_wq.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
diff --git a/drivers/scsi/fnic/vnic_wq_copy.c b/drivers/scsi/fnic/vnic_wq_copy.c
index 7b18635df7e6..96569d4ccc58 100644
--- a/drivers/scsi/fnic/vnic_wq_copy.c
+++ b/drivers/scsi/fnic/vnic_wq_copy.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#include <linux/errno.h>
@@ -108,4 +96,3 @@ void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
}
-
diff --git a/drivers/scsi/fnic/vnic_wq_copy.h b/drivers/scsi/fnic/vnic_wq_copy.h
index 6aff9740c3df..2f8340144e79 100644
--- a/drivers/scsi/fnic/vnic_wq_copy.h
+++ b/drivers/scsi/fnic/vnic_wq_copy.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_WQ_COPY_H_
#define _VNIC_WQ_COPY_H_
diff --git a/drivers/scsi/fnic/wq_enet_desc.h b/drivers/scsi/fnic/wq_enet_desc.h
index b121cbad18b8..9a933a5dee79 100644
--- a/drivers/scsi/fnic/wq_enet_desc.h
+++ b/drivers/scsi/fnic/wq_enet_desc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 7ba3c9312731..0c768e7d06b9 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -663,7 +663,7 @@ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata,
static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int transfersize = cmd->SCp.this_residual;
+ int transfersize = NCR5380_to_ncmd(cmd)->this_residual;
if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
return 0;
@@ -675,7 +675,7 @@ static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
/* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */
if (hostdata->board == BOARD_DTC3181E &&
cmd->sc_data_direction == DMA_TO_DEVICE)
- transfersize = min(cmd->SCp.this_residual, 512);
+ transfersize = min(transfersize, 512);
return min(transfersize, DMA_MAX_SIZE);
}
@@ -702,7 +702,7 @@ static struct scsi_host_template driver_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 727f8c8f30b5..7d56a236a011 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -12,7 +12,11 @@
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "gvp11.h"
@@ -22,8 +26,12 @@
struct gvp11_hostdata {
struct WD33C93_hostdata wh;
struct gvp11_scsiregs *regs;
+ struct device *dev;
};
+#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define TO_DMA_MASK(m) (~((unsigned long long)m & 0xffffffff))
+
static irqreturn_t gvp11_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
@@ -49,18 +57,35 @@ void gvp11_setup(char *str, int *ints)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host;
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ dma_addr_t addr;
int bank_mask;
static int scsi_alloc_out_of_range = 0;
+ addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
+ len, DMA_DIR(dir_in));
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
+ scsi_pointer->ptr);
+ return 1;
+ }
+ scsi_pointer->dma_handle = addr;
+
/* use bounce buffer if the physical address is bad */
if (addr & wh->dma_xfer_mask) {
- wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
+ scsi_pointer->dma_handle = (dma_addr_t) NULL;
+
+ wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
if (!scsi_alloc_out_of_range) {
wh->dma_bounce_buffer =
@@ -82,10 +107,32 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
- /* check if the address of the bounce buffer is OK */
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
+ /* will flush/invalidate cache for us */
+ addr = dma_map_single(hdata->dev,
+ wh->dma_bounce_buffer,
+ wh->dma_bounce_len,
+ DMA_DIR(dir_in));
+ /* can't map buffer; use PIO */
+ if (dma_mapping_error(hdata->dev, addr)) {
+ dev_warn(hdata->dev,
+ "cannot map bounce buffer %p\n",
+ wh->dma_bounce_buffer);
+ return 1;
+ }
+ }
if (addr & wh->dma_xfer_mask) {
+ /* drop useless mapping */
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(dir_in));
/* fall back to Chip RAM if address out of range */
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
kfree(wh->dma_bounce_buffer);
@@ -103,15 +150,19 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
return 1;
}
- addr = virt_to_bus(wh->dma_bounce_buffer);
+ if (!dir_in) {
+ /* copy to bounce buffer for a write */
+ memcpy(wh->dma_bounce_buffer, scsi_pointer->ptr,
+ scsi_pointer->this_residual);
+ }
+ /* chip RAM can be mapped to phys. address directly */
+ addr = virt_to_phys(wh->dma_bounce_buffer);
+ /* no need to flush/invalidate cache */
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
+ /* finally, have OK mapping (punted for PIO else) */
+ scsi_pointer->dma_handle = addr;
- if (!dir_in) {
- /* copy to bounce buffer for a write */
- memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
- cmd->SCp.this_residual);
- }
}
/* setup dma direction */
@@ -124,13 +175,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */
regs->ACR = addr;
- if (dir_in) {
- /* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
- } else {
- /* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
- }
+ /* no more cache flush here - dma_map_single() takes care */
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
if (bank_mask)
@@ -146,6 +191,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
@@ -155,11 +201,16 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
/* remove write bit from CONTROL bits */
regs->CNTR = GVP11_DMAC_INT_ENABLE;
+ if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
+ DMA_DIR(wh->dma_dir));
+
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir && SCpnt)
- memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
- SCpnt->SCp.this_residual);
+ memcpy(scsi_pointer->ptr, wh->dma_bounce_buffer,
+ scsi_pointer->this_residual);
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
kfree(wh->dma_bounce_buffer);
@@ -185,6 +236,7 @@ static struct scsi_host_template gvp11_scsi_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int check_wd33c93(struct gvp11_scsiregs *regs)
@@ -280,6 +332,13 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
default_dma_xfer_mask = ent->driver_data;
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(default_dma_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(default_dma_xfer_mask));
+ return -ENODEV;
+ }
+
/*
* Rumors state that some GVP ram boards use the same product
* code as the SCSI controllers. Therefore if the board-size
@@ -320,9 +379,16 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance);
- if (gvp11_xfer_mask)
+ if (gvp11_xfer_mask) {
hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
- else
+ if (dma_set_mask_and_coherent(&z->dev,
+ TO_DMA_MASK(gvp11_xfer_mask))) {
+ dev_warn(&z->dev, "cannot use DMA mask %llx\n",
+ TO_DMA_MASK(gvp11_xfer_mask));
+ error = -ENODEV;
+ goto fail_check_or_alloc;
+ }
+ } else
hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
hdata->wh.no_sync = 0xff;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2213a91923a5..9aebf4a26b13 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -8,7 +8,6 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
-#include <linux/async.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
@@ -92,7 +91,7 @@
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
-#define HISI_SAS_WAIT_PHYUP_TIMEOUT (20 * HZ)
+#define HISI_SAS_WAIT_PHYUP_TIMEOUT (30 * HZ)
#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
struct hisi_hba;
@@ -154,6 +153,7 @@ enum hisi_sas_bit_err_type {
enum hisi_sas_phy_event {
HISI_PHYE_PHY_UP = 0U,
HISI_PHYE_LINK_RESET,
+ HISI_PHYE_PHY_UP_PM,
HISI_PHYES_NUM,
};
@@ -229,13 +229,6 @@ struct hisi_sas_device {
spinlock_t lock; /* For protecting slots */
};
-struct hisi_sas_tmf_task {
- int force_phy;
- int phy_id;
- u8 tmf;
- u16 tag_of_task_to_be_managed;
-};
-
struct hisi_sas_slot {
struct list_head entry;
struct list_head delivery;
@@ -254,7 +247,7 @@ struct hisi_sas_slot {
dma_addr_t cmd_hdr_dma;
struct timer_list internal_abort_timer;
bool is_internal;
- struct hisi_sas_tmf_task *tmf;
+ struct sas_tmf_task *tmf;
/* Do not reorder/change members after here */
void *buf;
dma_addr_t buf_dma;
@@ -327,8 +320,7 @@ struct hisi_sas_hw {
void (*prep_stp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
void (*prep_abort)(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort);
+ struct hisi_sas_slot *slot);
void (*phys_init)(struct hisi_hba *hisi_hba);
void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -657,6 +649,7 @@ extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
int enable);
extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
gfp_t gfp_flags);
+extern void hisi_sas_phy_bcast(struct hisi_sas_phy *phy);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index f206c433de32..699b07abb6b0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -10,12 +10,6 @@
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
-static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
- u8 *lun, struct hisi_sas_tmf_task *tmf);
-static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device,
- int abort_flag, int tag, bool rst_to_recover);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
@@ -23,6 +17,10 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
struct domain_device *device);
static void hisi_sas_dev_gone(struct domain_device *device);
+struct hisi_sas_internal_abort_data {
+ bool rst_ha_timeout; /* reset the HA for timeout */
+};
+
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
switch (fis->command) {
@@ -158,7 +156,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- clear_bit(slot_idx, bitmap);
+ __clear_bit(slot_idx, bitmap);
}
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
@@ -175,7 +173,7 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- set_bit(slot_idx, bitmap);
+ __set_bit(slot_idx, bitmap);
}
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
@@ -206,14 +204,6 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
return index;
}
-static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->slot_index_count; ++i)
- hisi_sas_slot_index_clear(hisi_hba, i);
-}
-
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
@@ -229,10 +219,15 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
if (!sas_protocol_ata(task->task_proto)) {
- if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ if (slot->n_elem) {
+ if (task->task_proto & SAS_PROTOCOL_SSP)
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
+ task->data_dir);
+ else
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ }
if (slot->n_elem_dif) {
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
@@ -273,36 +268,29 @@ static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
}
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
- hisi_hba->hw->prep_abort(hisi_hba, slot,
- device_id, abort_flag, tag_to_abort);
+ hisi_hba->hw->prep_abort(hisi_hba, slot);
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
- struct sas_task *task, int n_elem,
- int n_elem_req)
+ struct sas_task *task, int n_elem)
{
struct device *dev = hisi_hba->dev;
- if (!sas_protocol_ata(task->task_proto)) {
+ if (!sas_protocol_ata(task->task_proto) && n_elem) {
if (task->num_scatter) {
- if (n_elem)
- dma_unmap_sg(dev, task->scatter,
- task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(dev, task->scatter, task->num_scatter,
+ task->data_dir);
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- if (n_elem_req)
- dma_unmap_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
}
}
}
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
- struct sas_task *task, int *n_elem,
- int *n_elem_req)
+ struct sas_task *task, int *n_elem)
{
struct device *dev = hisi_hba->dev;
int rc;
@@ -320,9 +308,9 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
goto prep_out;
}
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
- *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
- 1, DMA_TO_DEVICE);
- if (!*n_elem_req) {
+ *n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
+ 1, DMA_TO_DEVICE);
+ if (!*n_elem) {
rc = -ENOMEM;
goto prep_out;
}
@@ -344,8 +332,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
err_out_dma_unmap:
/* It would be better to call dma_unmap_sg() here, but it's messy */
- hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
- *n_elem_req);
+ hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
prep_out:
return rc;
}
@@ -403,95 +390,17 @@ err_out_dif_dma_unmap:
return rc;
}
-static int hisi_sas_task_prep(struct sas_task *task,
- struct hisi_sas_dq **dq_pointer,
- bool is_tmf, struct hisi_sas_tmf_task *tmf,
- int *pass)
+static
+void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_dq *dq,
+ struct hisi_sas_device *sas_dev)
{
- struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- struct asd_sas_port *sas_port = device->port;
- struct device *dev = hisi_hba->dev;
- int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
- struct scsi_cmnd *scmd = NULL;
- struct hisi_sas_dq *dq;
- unsigned long flags;
+ struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ int dlvry_queue_slot, dlvry_queue;
+ struct sas_task *task = slot->task;
int wr_q_index;
- if (DEV_IS_GONE(sas_dev)) {
- if (sas_dev)
- dev_info(dev, "task prep: device %d not ready\n",
- sas_dev->device_id);
- else
- dev_info(dev, "task prep: device %016llx not ready\n",
- SAS_ADDR(device->sas_addr));
-
- return -ECOMM;
- }
-
- if (task->uldd_task) {
- struct ata_queued_cmd *qc;
-
- if (dev_is_sata(device)) {
- qc = task->uldd_task;
- scmd = qc->scsicmd;
- } else {
- scmd = task->uldd_task;
- }
- }
-
- if (scmd) {
- unsigned int dq_index;
- u32 blk_tag;
-
- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
- dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
- *dq_pointer = dq = &hisi_hba->dq[dq_index];
- } else {
- struct Scsi_Host *shost = hisi_hba->shost;
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- int queue = qmap->mq_map[raw_smp_processor_id()];
-
- *dq_pointer = dq = &hisi_hba->dq[queue];
- }
-
- port = to_hisi_sas_port(sas_port);
- if (port && !port->port_attached) {
- dev_info(dev, "task prep: %s port%d not attach device\n",
- (dev_is_sata(device)) ?
- "SATA/STP" : "SAS",
- device->port->id);
-
- return -ECOMM;
- }
-
- rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req);
- if (rc < 0)
- goto prep_out;
-
- if (!sas_protocol_ata(task->task_proto)) {
- rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
- if (rc < 0)
- goto err_out_dma_unmap;
- }
-
- if (hisi_hba->hw->slot_index_alloc)
- rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
- else
- rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
-
- if (rc < 0)
- goto err_out_dif_dma_unmap;
-
- slot_idx = rc;
- slot = &hisi_hba->slot_info[slot_idx];
-
spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
@@ -505,16 +414,11 @@ static int hisi_sas_task_prep(struct sas_task *task,
dlvry_queue_slot = wr_q_index;
slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->n_elem_dif = n_elem_dif;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->task = task;
- slot->port = port;
- slot->tmf = tmf;
- slot->is_internal = is_tmf;
+
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
@@ -531,45 +435,39 @@ static int hisi_sas_task_prep(struct sas_task *task,
break;
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_STP_ALL:
hisi_sas_task_prep_ata(hisi_hba, slot);
break;
- default:
- dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
- task->task_proto);
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ hisi_sas_task_prep_abort(hisi_hba, slot);
break;
+ default:
+ return;
}
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- ++(*pass);
+ /* Make slot memories observable before marking as ready */
+ smp_wmb();
WRITE_ONCE(slot->ready, 1);
- return 0;
-
-err_out_dif_dma_unmap:
- if (!sas_protocol_ata(task->task_proto))
- hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
-err_out_dma_unmap:
- hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req);
-prep_out:
- dev_err(dev, "task prep: failed[%d]!\n", rc);
- return rc;
+ spin_lock(&dq->lock);
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock(&dq->lock);
}
-static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
- bool is_tmf, struct hisi_sas_tmf_task *tmf)
+static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- u32 rc;
- u32 pass = 0;
- struct hisi_hba *hisi_hba;
- struct device *dev;
+ int n_elem = 0, n_elem_dif = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ bool internal_abort = sas_is_internal_abort(task);
+ struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq = NULL;
+ struct hisi_sas_port *port;
+ struct hisi_hba *hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct device *dev;
+ int rc;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
@@ -580,7 +478,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
* libsas will use dev->port, should
* not call task_done for sata
*/
- if (device->dev_type != SAS_SATA_DEV)
+ if (device->dev_type != SAS_SATA_DEV && !internal_abort)
task->task_done(task);
return -ECOMM;
}
@@ -588,25 +486,126 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
hisi_hba = dev_to_hisi_hba(device);
dev = hisi_hba->dev;
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
- if (!gfpflags_allow_blocking(gfp_flags))
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ case SAS_PROTOCOL_SMP:
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_STP_ALL:
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
+ if (!gfpflags_allow_blocking(gfp_flags))
+ return -EINVAL;
+
+ down(&hisi_hba->sem);
+ up(&hisi_hba->sem);
+ }
+
+ if (DEV_IS_GONE(sas_dev)) {
+ if (sas_dev)
+ dev_info(dev, "task prep: device %d not ready\n",
+ sas_dev->device_id);
+ else
+ dev_info(dev, "task prep: device %016llx not ready\n",
+ SAS_ADDR(device->sas_addr));
+
+ return -ECOMM;
+ }
+
+ port = to_hisi_sas_port(sas_port);
+ if (!port->port_attached) {
+ dev_info(dev, "task prep: %s port%d not attach device\n",
+ dev_is_sata(device) ? "SATA/STP" : "SAS",
+ device->port->id);
+
+ return -ECOMM;
+ }
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (scmd) {
+ unsigned int dq_index;
+ u32 blk_tag;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
+ dq = &hisi_hba->dq[dq_index];
+ } else {
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ int queue = qmap->mq_map[raw_smp_processor_id()];
+
+ dq = &hisi_hba->dq[queue];
+ }
+ break;
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ if (!hisi_hba->hw->prep_abort)
+ return TMF_RESP_FUNC_FAILED;
+
+ if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
+ return -EIO;
+
+ hisi_hba = dev_to_hisi_hba(device);
+
+ if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
- down(&hisi_hba->sem);
- up(&hisi_hba->sem);
+ port = to_hisi_sas_port(sas_port);
+ dq = &hisi_hba->dq[task->abort_task.qid];
+ break;
+ default:
+ dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
+ task->task_proto);
+ return -EINVAL;
}
- /* protect task_prep and start_delivery sequence */
- rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
- if (rc)
- dev_err(dev, "task exec: failed[%d]!\n", rc);
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
+ if (rc < 0)
+ goto prep_out;
- if (likely(pass)) {
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
+ if (!sas_protocol_ata(task->task_proto)) {
+ rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
+ if (rc < 0)
+ goto err_out_dma_unmap;
}
+ if (!internal_abort && hisi_hba->hw->slot_index_alloc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+ else
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
+
+ if (rc < 0)
+ goto err_out_dif_dma_unmap;
+
+ slot = &hisi_hba->slot_info[rc];
+ slot->n_elem = n_elem;
+ slot->n_elem_dif = n_elem_dif;
+ slot->task = task;
+ slot->port = port;
+
+ slot->tmf = task->tmf;
+ slot->is_internal = !!task->tmf || internal_abort;
+
+ /* protect task_prep and start_delivery sequence */
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
+
+ return 0;
+
+err_out_dif_dma_unmap:
+ if (!sas_protocol_ata(task->task_proto))
+ hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
+err_out_dma_unmap:
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem);
+prep_out:
+ dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
}
@@ -619,12 +618,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
if (!phy->phy_attached)
return;
- if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
- !sas_phy->suspended) {
- dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
- return;
- }
-
sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
@@ -689,25 +682,39 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
return sas_dev;
}
+static void hisi_sas_tmf_aborted(struct sas_task *task)
+{
+ struct hisi_sas_slot *slot = task->lldd_task;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * sync irq to avoid free'ing task
+ * before using task in IO completion
+ */
+ synchronize_irq(cq->irq_no);
+ slot->task = NULL;
+ }
+}
+
#define HISI_SAS_DISK_RECOVER_CNT 3
static int hisi_sas_init_device(struct domain_device *device)
{
int rc = TMF_RESP_FUNC_COMPLETE;
struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
int retry = HISI_SAS_DISK_RECOVER_CNT;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = hisi_hba->dev;
- struct sas_phy *local_phy;
switch (device->dev_type) {
case SAS_END_DEVICE:
int_to_scsilun(0, &lun);
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
while (retry-- > 0) {
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
- &tmf_task);
+ rc = sas_clear_task_set(device, lun.scsi_lun);
if (rc == TMF_RESP_FUNC_COMPLETE) {
hisi_sas_release_task(hisi_hba, device);
break;
@@ -719,30 +726,18 @@ static int hisi_sas_init_device(struct domain_device *device)
case SAS_SATA_PM_PORT:
case SAS_SATA_PENDING:
/*
- * send HARD RESET to clear previous affiliation of
- * STP target port
+ * If an expander is swapped when a SATA disk is attached then
+ * we should issue a hard reset to clear previous affiliation
+ * of STP target port, see SPL (chapter 6.19.4).
+ *
+ * However we don't need to issue a hard reset here for these
+ * reasons:
+ * a. When probing the device, libsas/libata already issues a
+ * hard reset in sas_probe_sata() -> ata_sas_async_probe().
+ * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
+ * to issue a hard reset by checking the dev status (== INIT).
+ * b. When resetting the controller, this is simply unnecessary.
*/
- local_phy = sas_get_local_phy(device);
- if (!scsi_is_sas_phy_local(local_phy) &&
- !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
- unsigned long deadline = ata_deadline(jiffies, 20000);
- struct sata_device *sata_dev = &device->sata_dev;
- struct ata_host *ata_host = sata_dev->ata_host;
- struct ata_port_operations *ops = ata_host->ops;
- struct ata_port *ap = sata_dev->ap;
- struct ata_link *link;
- unsigned int classes;
-
- ata_for_each_link(link, ap, EDGE)
- rc = ops->hardreset(link, &classes,
- deadline);
- }
- sas_put_local_phy(local_phy);
- if (rc) {
- dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
- return rc;
- }
-
while (retry-- > 0) {
rc = hisi_sas_softreset_ata_disk(device);
if (!rc)
@@ -758,15 +753,19 @@ static int hisi_sas_init_device(struct domain_device *device)
int hisi_sas_slave_alloc(struct scsi_device *sdev)
{
- struct domain_device *ddev;
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ struct hisi_sas_device *sas_dev = ddev->lldd_dev;
int rc;
rc = sas_slave_alloc(sdev);
if (rc)
return rc;
- ddev = sdev_to_domain_dev(sdev);
- return hisi_sas_init_device(ddev);
+ rc = hisi_sas_init_device(ddev);
+ if (rc)
+ return rc;
+ sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);
@@ -816,7 +815,6 @@ static int hisi_sas_dev_found(struct domain_device *device)
dev_info(dev, "dev[%d:%x] found\n",
sas_dev->device_id, sas_dev->dev_type);
- sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
return 0;
err_out:
@@ -860,10 +858,11 @@ int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
-static void hisi_sas_phyup_work(struct work_struct *work)
+static void hisi_sas_phyup_work_common(struct work_struct *work,
+ enum hisi_sas_phy_event event)
{
struct hisi_sas_phy *phy =
- container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
+ container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -874,6 +873,11 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
}
+static void hisi_sas_phyup_work(struct work_struct *work)
+{
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
+}
+
static void hisi_sas_linkreset_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
@@ -883,9 +887,21 @@ static void hisi_sas_linkreset_work(struct work_struct *work)
hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
}
+static void hisi_sas_phyup_pm_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
+ pm_runtime_put_sync(dev);
+}
+
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+ [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
};
bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
@@ -917,10 +933,14 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ unsigned long flags;
dev_dbg(dev, "phy%d OOB ready\n", phy_no);
- if (phy->phy_attached)
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->phy_attached) {
+ spin_unlock_irqrestore(&phy->lock, flags);
return;
+ }
if (!timer_pending(&phy->timer)) {
if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
@@ -928,13 +948,17 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
phy->timer.expires = jiffies +
HISI_SAS_WAIT_PHYUP_TIMEOUT;
add_timer(&phy->timer);
- } else {
- dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
- phy_no, phy->wait_phyup_cnt);
- phy->wait_phyup_cnt = 0;
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return;
}
+
+ dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
+ phy_no, phy->wait_phyup_cnt);
+ phy->wait_phyup_cnt = 0;
}
+ spin_unlock_irqrestore(&phy->lock, flags);
}
+
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
@@ -1026,8 +1050,7 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1072,6 +1095,29 @@ static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
hisi_hba->hw->dereg_device(hisi_hba, device);
}
+static int
+hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
+ bool rst_ha_timeout)
+{
+ struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
+ struct domain_device *device = sas_dev->sas_device;
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+ int i, rc;
+
+ for (i = 0; i < hisi_hba->cq_nvecs; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ const struct cpumask *mask = cq->irq_mask;
+
+ if (mask && !cpumask_intersects(cpu_online_mask, mask))
+ continue;
+ rc = sas_execute_internal_abort_dev(device, i, &data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static void hisi_sas_dev_gone(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1084,8 +1130,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
down(&hisi_hba->sem);
if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, true);
+ hisi_sas_internal_task_abort_dev(sas_dev, true);
hisi_sas_dereg_device(hisi_hba, device);
@@ -1103,11 +1148,6 @@ static void hisi_sas_dev_gone(struct domain_device *device)
up(&hisi_hba->sem);
}
-static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
-}
-
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
@@ -1156,6 +1196,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
u8 sts = phy->phy_attached;
int ret = 0;
+ down(&hisi_hba->sem);
phy->reset_completion = &completion;
switch (func) {
@@ -1189,7 +1230,8 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
goto out;
}
- if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) {
+ if (sts && !wait_for_completion_timeout(&completion,
+ HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
phy_no, func);
if (phy->in_reset)
@@ -1199,151 +1241,10 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
out:
phy->reset_completion = NULL;
+ up(&hisi_hba->sem);
return ret;
}
-static void hisi_sas_task_done(struct sas_task *task)
-{
- del_timer_sync(&task->slow_task->timer);
- complete(&task->slow_task->completion);
-}
-
-static void hisi_sas_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
- bool is_completed = true;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- is_completed = false;
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- if (!is_completed)
- complete(&task->slow_task->completion);
-}
-
-#define TASK_TIMEOUT (20 * HZ)
-#define TASK_RETRY 3
-#define INTERNAL_ABORT_TIMEOUT (6 * HZ)
-static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
- void *parameter, u32 para_len,
- struct hisi_sas_tmf_task *tmf)
-{
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
- struct device *dev = hisi_hba->dev;
- struct sas_task *task;
- int res, retry;
-
- for (retry = 0; retry < TASK_RETRY; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = device;
- task->task_proto = device->tproto;
-
- if (dev_is_sata(device)) {
- task->ata_task.device_control_reg_update = 1;
- memcpy(&task->ata_task.fis, parameter, para_len);
- } else {
- memcpy(&task->ssp_task, parameter, para_len);
- }
- task->task_done = hisi_sas_task_done;
-
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
- add_timer(&task->slow_task->timer);
-
- res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
-
- if (res) {
- del_timer_sync(&task->slow_task->timer);
- dev_err(dev, "abort tmf: executing internal task failed: %d\n",
- res);
- goto ex_err;
- }
-
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- struct hisi_sas_slot *slot = task->lldd_task;
-
- dev_err(dev, "abort tmf: TMF task timeout and not done\n");
- if (slot) {
- struct hisi_sas_cq *cq =
- &hisi_hba->cq[slot->dlvry_queue];
- /*
- * sync irq to avoid free'ing task
- * before using task in IO completion
- */
- synchronize_irq(cq->irq_no);
- slot->task = NULL;
- }
-
- goto ex_err;
- } else
- dev_err(dev, "abort tmf: TMF task timeout\n");
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_SUCC) {
- res = TMF_RESP_FUNC_SUCC;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun
- */
- dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
- SAS_ADDR(device->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- dev_warn(dev, "abort tmf: blocked task error\n");
- res = -EMSGSIZE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_OPEN_REJECT) {
- dev_warn(dev, "abort tmf: open reject failed\n");
- res = -EIO;
- } else {
- dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
- SAS_ADDR(device->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- }
- sas_free_task(task);
- task = NULL;
- }
-ex_err:
- if (retry == TASK_RETRY)
- dev_warn(dev, "abort tmf: executing internal task failed!\n");
- sas_free_task(task);
- return res;
-}
-
static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
bool reset, int pmp, u8 *fis)
{
@@ -1366,13 +1267,12 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
int rc = TMF_RESP_FUNC_FAILED;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- int s = sizeof(struct host_to_dev_fis);
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
+ rc = sas_execute_ata_cmd(device, fis, -1);
if (rc != TMF_RESP_FUNC_COMPLETE)
break;
}
@@ -1382,8 +1282,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis,
- s, NULL);
+ rc = sas_execute_ata_cmd(device, fis, -1);
if (rc != TMF_RESP_FUNC_COMPLETE)
dev_err(dev, "ata disk %016llx de-reset failed\n",
SAS_ADDR(device->sas_addr));
@@ -1399,20 +1298,6 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
return rc;
}
-static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
- u8 *lun, struct hisi_sas_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
-
- if (!(device->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy(ssp_task.LUN, lun, 8);
-
- return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
- sizeof(ssp_task), tmf);
-}
-
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
@@ -1433,11 +1318,13 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
sas_port = device->port;
port = to_hisi_sas_port(sas_port);
+ spin_lock(&sas_port->phy_list_lock);
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
if (state & BIT(sas_phy->id)) {
phy = sas_phy->lldd_phy;
break;
}
+ spin_unlock(&sas_port->phy_list_lock);
if (phy) {
port->id = phy->port_id;
@@ -1454,6 +1341,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
{
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct asd_sas_port *_sas_port = NULL;
int phy_no;
@@ -1482,6 +1370,12 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
}
}
+ /*
+ * Ensure any bcast events are processed prior to calling async nexus
+ * reset calls from hisi_sas_clear_nexus_ha() ->
+ * hisi_sas_async_I_T_nexus_reset()
+ */
+ sas_drain_work(sas_ha);
}
static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
@@ -1505,31 +1399,25 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct asd_sas_port *sas_port,
struct domain_device *device)
{
- struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
struct ata_port *ap = device->sata_dev.ap;
struct device *dev = hisi_hba->dev;
- int s = sizeof(struct host_to_dev_fis);
int rc = TMF_RESP_FUNC_FAILED;
- struct asd_sas_phy *sas_phy;
struct ata_link *link;
u8 fis[20] = {0};
- u32 state;
+ int i;
- state = hisi_hba->hw->get_phys_state(hisi_hba);
- list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
- if (!(state & BIT(sas_phy->id)))
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ if (!(sas_port->phy_mask & BIT(i)))
continue;
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
- tmf_task.phy_id = sas_phy->id;
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
- &tmf_task);
+ rc = sas_execute_ata_cmd(device, fis, i);
if (rc != TMF_RESP_FUNC_COMPLETE) {
dev_err(dev, "phy%d ata reset failed rc=%d\n",
- sas_phy->id, rc);
+ i, rc);
break;
}
}
@@ -1548,9 +1436,7 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
continue;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0,
- false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0)
dev_err(dev, "STP reject: abort dev failed %d\n", rc);
}
@@ -1581,7 +1467,6 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
- down(&hisi_hba->sem);
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1606,9 +1491,9 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
- up(&hisi_hba->sem);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
@@ -1619,8 +1504,11 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
if (!hisi_hba->hw->soft_reset)
return -1;
- if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ down(&hisi_hba->sem);
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ up(&hisi_hba->sem);
return -1;
+ }
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
@@ -1646,9 +1534,9 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
return rc;
}
+ clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_done(hisi_hba);
- clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
dev_info(dev, "controller reset complete\n");
return 0;
@@ -1656,8 +1544,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
static int hisi_sas_abort_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
+ struct hisi_sas_internal_abort_data internal_abort_data = { false };
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba;
@@ -1692,21 +1579,13 @@ static int hisi_sas_abort_task(struct sas_task *task)
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_slot *slot = task->lldd_task;
u16 tag = slot->idx;
int rc2;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
-
- rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
- &tmf_task);
-
- rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag,
- false);
+ rc = sas_abort_task(task, tag);
+ rc2 = sas_execute_internal_abort_single(device, tag,
+ slot->dlvry_queue, &internal_abort_data);
if (rc2 < 0) {
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
return TMF_RESP_FUNC_FAILED;
@@ -1726,9 +1605,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV,
- 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "abort task: internal abort failed\n");
goto out;
@@ -1742,9 +1619,9 @@ static int hisi_sas_abort_task(struct sas_task *task)
u32 tag = slot->idx;
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_CMD, tag,
- false);
+ rc = sas_execute_internal_abort_single(device,
+ tag, slot->dlvry_queue,
+ &internal_abort_data);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
task->lldd_task) {
/*
@@ -1764,41 +1641,25 @@ out:
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- struct hisi_sas_tmf_task tmf_task;
int rc;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
return TMF_RESP_FUNC_FAILED;
}
hisi_sas_dereg_device(hisi_hba, device);
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
-
+ rc = sas_abort_task_set(device, lun);
if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
return rc;
}
-static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
-{
- struct hisi_sas_tmf_task tmf_task;
- int rc;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
-
- return rc;
-}
-
-#define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ)
-
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
struct sas_phy *local_phy = sas_get_local_phy(device);
@@ -1844,13 +1705,18 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
/* report PHY down if timed out */
if (rc == -ETIMEDOUT)
hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
- } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
- /*
- * If in init state, we rely on caller to wait for link to be
- * ready; otherwise, except phy reset is fail, delay.
- */
- if (!rc)
- msleep(2000);
+ return rc;
+ }
+
+ if (rc)
+ return rc;
+
+ /* Remote phy */
+ if (dev_is_sata(device)) {
+ rc = sas_ata_wait_after_reset(device,
+ HISI_SAS_WAIT_PHYUP_TIMEOUT);
+ } else {
+ msleep(2000);
}
return rc;
@@ -1858,12 +1724,12 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int rc;
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
return TMF_RESP_FUNC_FAILED;
@@ -1911,8 +1777,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
int rc = TMF_RESP_FUNC_FAILED;
/* Clear internal IO and then lu reset */
- rc = hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0, false);
+ rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
if (rc < 0) {
dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
@@ -1930,9 +1795,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
hisi_sas_release_task(hisi_hba, device);
sas_put_local_phy(phy);
} else {
- struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
-
- rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ rc = sas_lu_reset(device, lun);
if (rc == TMF_RESP_FUNC_COMPLETE)
hisi_sas_release_task(hisi_hba, device);
}
@@ -1960,12 +1823,14 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
ASYNC_DOMAIN_EXCLUSIVE(async);
- int i;
+ int i, ret;
queue_work(hisi_hba->wq, &r.work);
wait_for_completion(r.completion);
- if (!r.done)
- return TMF_RESP_FUNC_FAILED;
+ if (!r.done) {
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
@@ -1982,28 +1847,20 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
async_synchronize_full_domain(&async);
hisi_sas_release_tasks(hisi_hba);
- return TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_COMPLETE;
+out:
+ return ret;
}
static int hisi_sas_query_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
- struct domain_device *device = task->dev;
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
-
- rc = hisi_sas_debug_issue_ssp_tmf(device,
- lun.scsi_lun,
- &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
@@ -2019,237 +1876,48 @@ static int hisi_sas_query_task(struct sas_task *task)
return rc;
}
-static int
-hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
- struct sas_task *task, int abort_flag,
- int task_tag, struct hisi_sas_dq *dq)
+static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
+ void *data)
{
struct domain_device *device = task->dev;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = hisi_hba->dev;
- struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
- struct asd_sas_port *sas_port = device->port;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags;
- int wr_q_index;
-
- if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
- return -EINVAL;
-
- if (!device->port)
- return -1;
-
- port = to_hisi_sas_port(sas_port);
-
- /* simply get a slot and send abort command */
- rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
- if (rc < 0)
- goto err_out;
-
- slot_idx = rc;
- slot = &hisi_hba->slot_info[slot_idx];
-
- spin_lock(&dq->lock);
- wr_q_index = dq->wr_point;
- dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
- list_add_tail(&slot->delivery, &dq->list);
- spin_unlock(&dq->lock);
- spin_lock(&sas_dev->lock);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock(&sas_dev->lock);
-
- dlvry_queue = dq->id;
- dlvry_queue_slot = wr_q_index;
-
- slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->dlvry_queue = dlvry_queue;
- slot->dlvry_queue_slot = dlvry_queue_slot;
- cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
- slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->task = task;
- slot->port = port;
- slot->is_internal = true;
- task->lldd_task = slot;
-
- memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
- memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
- memset(hisi_sas_status_buf_addr_mem(slot), 0,
- sizeof(struct hisi_sas_err_record));
-
- hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
- abort_flag, task_tag);
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- WRITE_ONCE(slot->ready, 1);
- /* send abort command to the chip */
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
-
- return 0;
-
-err_out:
- dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
-
- return rc;
-}
-
-/**
- * _hisi_sas_internal_task_abort -- execute an internal
- * abort command for single IO command or a device
- * @hisi_hba: host controller struct
- * @device: domain device
- * @abort_flag: mode of operation, device or single IO
- * @tag: tag of IO to be aborted (only relevant to single
- * IO mode)
- * @dq: delivery queue for this internal abort command
- * @rst_to_recover: If rst_to_recover set, queue a controller
- * reset if an internal abort times out.
- */
-static int
-_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device, int abort_flag,
- int tag, struct hisi_sas_dq *dq, bool rst_to_recover)
-{
- struct sas_task *task;
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = hisi_hba->dev;
- int res;
-
- /*
- * The interface is not realized means this HW don't support internal
- * abort, or don't need to do internal abort. Then here, we return
- * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
- * the internal abort has been executed and returned CQ.
- */
- if (!hisi_hba->hw->prep_abort)
- return TMF_RESP_FUNC_FAILED;
-
- if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
- return -EIO;
-
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = device;
- task->task_proto = device->tproto;
- task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
- task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
- add_timer(&task->slow_task->timer);
-
- res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
- task, abort_flag, tag, dq);
- if (res) {
- del_timer_sync(&task->slow_task->timer);
- dev_err(dev, "internal task abort: executing internal task failed: %d\n",
- res);
- goto exit;
- }
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
-
- /* Internal abort timed out */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- struct hisi_sas_slot *slot = task->lldd_task;
-
- set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
-
- if (slot) {
- struct hisi_sas_cq *cq =
- &hisi_hba->cq[slot->dlvry_queue];
- /*
- * sync irq to avoid free'ing task
- * before using task in IO completion
- */
- synchronize_irq(cq->irq_no);
- slot->task = NULL;
- }
-
- if (rst_to_recover) {
- dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n");
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- } else {
- dev_err(dev, "internal task abort: timeout and not done.\n");
- }
-
- res = -EIO;
- goto exit;
- } else
- dev_err(dev, "internal task abort: timeout.\n");
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
- res = TMF_RESP_FUNC_COMPLETE;
- goto exit;
- }
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct hisi_sas_internal_abort_data *timeout = data;
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == TMF_RESP_FUNC_SUCC) {
- res = TMF_RESP_FUNC_SUCC;
- goto exit;
- }
+ if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+ queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
-exit:
- dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
- SAS_ADDR(device->sas_addr), task,
- task->task_status.resp, /* 0 is complete, -1 is undelivered */
- task->task_status.stat);
- sas_free_task(task);
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ pr_err("Internal abort: timeout %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ } else {
+ struct hisi_sas_slot *slot = task->lldd_task;
- return res;
-}
+ set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
-static int
-hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
- struct domain_device *device,
- int abort_flag, int tag, bool rst_to_recover)
-{
- struct hisi_sas_slot *slot;
- struct device *dev = hisi_hba->dev;
- struct hisi_sas_dq *dq;
- int i, rc;
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * sync irq to avoid free'ing task
+ * before using task in IO completion
+ */
+ synchronize_irq(cq->irq_no);
+ slot->task = NULL;
+ }
- switch (abort_flag) {
- case HISI_SAS_INT_ABT_CMD:
- slot = &hisi_hba->slot_info[tag];
- dq = &hisi_hba->dq[slot->dlvry_queue];
- return _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag, dq,
- rst_to_recover);
- case HISI_SAS_INT_ABT_DEV:
- for (i = 0; i < hisi_hba->cq_nvecs; i++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[i];
- const struct cpumask *mask = cq->irq_mask;
-
- if (mask && !cpumask_intersects(cpu_online_mask, mask))
- continue;
- dq = &hisi_hba->dq[i];
- rc = _hisi_sas_internal_task_abort(hisi_hba, device,
- abort_flag, tag,
- dq, rst_to_recover);
- if (rc)
- return rc;
+ if (timeout->rst_ha_timeout) {
+ pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
+ SAS_ADDR(device->sas_addr));
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ } else {
+ pr_err("Internal abort: timeout and not done %016llx.\n",
+ SAS_ADDR(device->sas_addr));
}
- break;
- default:
- dev_err(dev, "Unrecognised internal abort flag (%d)\n",
- abort_flag);
- return -EINVAL;
+
+ return true;
}
- return 0;
+ return false;
}
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
@@ -2325,6 +1993,22 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
+{
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct sas_ha_struct *sha = &hisi_hba->sha;
+
+ if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ return;
+
+ if (test_bit(SAS_HA_FROZEN, &sha->state))
+ return;
+
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);
+
void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
{
int i;
@@ -2360,13 +2044,14 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_control_phy = hisi_sas_control_phy,
.lldd_abort_task = hisi_sas_abort_task,
.lldd_abort_task_set = hisi_sas_abort_task_set,
- .lldd_clear_aca = hisi_sas_clear_aca,
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
.lldd_write_gpio = hisi_sas_write_gpio,
+ .lldd_tmf_aborted = hisi_sas_tmf_aborted,
+ .lldd_abort_timeout = hisi_sas_internal_abort_timeout,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2516,9 +2201,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->breakpoint)
goto err_out;
- hisi_hba->slot_index_count = max_command_entries;
- s = hisi_hba->slot_index_count / BITS_PER_BYTE;
- hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
+ s = hisi_hba->slot_index_count = max_command_entries;
+ hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
goto err_out;
@@ -2536,7 +2220,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->sata_breakpoint)
goto err_out;
- hisi_sas_slot_index_init(hisi_hba);
hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
@@ -2687,9 +2370,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (error)
- error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 3059d19e4368..d643c5a49aa9 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -958,7 +958,7 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd, fburst = 0;
u32 dw1, dw2;
@@ -1200,8 +1200,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -1283,8 +1282,6 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -1415,9 +1412,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
goto end;
}
- if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ hisi_sas_phy_bcast(phy);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1639,11 +1634,8 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = i * HISI_SAS_PHY_INT_NR;
for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (irq < 0) {
- dev_err(dev, "irq init: fail map phy interrupt %d\n",
- idx);
+ if (irq < 0)
return irq;
- }
rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
DRV_NAME " phy", phy);
@@ -1658,11 +1650,8 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (irq < 0) {
- dev_err(dev, "irq init: could not map cq interrupt %d\n",
- idx);
+ if (irq < 0)
return irq;
- }
rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0,
DRV_NAME " cq", &hisi_hba->cq[i]);
@@ -1676,11 +1665,8 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
irq = platform_get_irq(pdev, idx);
- if (irq < 0) {
- dev_err(dev, "irq init: could not map fatal interrupt %d\n",
- idx);
+ if (irq < 0)
return irq;
- }
rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
DRV_NAME " fatal", hisi_hba);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 64ed3e472e65..cded42f4ca44 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -805,8 +805,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
return -SAS_QUEUE_FULL;
}
/*
- * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
- */
+ * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
+ */
if (sata_dev ^ (start & 1))
break;
start++;
@@ -1742,7 +1742,7 @@ static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
u8 *buf_cmd;
u32 dw1 = 0, dw2 = 0;
@@ -2344,8 +2344,7 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
@@ -2429,8 +2428,6 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -2492,7 +2489,8 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_ata_task *ata_task = &task->ata_task;
+ struct sas_tmf_task *tmf = slot->tmf;
u8 *buf_cmd;
int has_data = 0, hdr_tag = 0;
u32 dw0, dw1 = 0, dw2 = 0;
@@ -2505,9 +2503,9 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
else
dw0 |= 4 << CMD_HDR_CMD_OFF;
- if (tmf && tmf->force_phy) {
+ if (tmf && ata_task->force_phy) {
dw0 |= CMD_HDR_FORCE_PHY_MSK;
- dw0 |= (1 << tmf->phy_id) << CMD_HDR_PHY_ID_OFF;
+ dw0 |= (1 << ata_task->force_phy_id) << CMD_HDR_PHY_ID_OFF;
}
hdr->dw0 = cpu_to_le32(dw0);
@@ -2603,14 +2601,15 @@ static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
}
static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
struct domain_device *dev = task->dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct hisi_sas_port *port = slot->port;
struct timer_list *timer = &slot->internal_abort_timer;
+ struct hisi_sas_device *sas_dev = dev->lldd_dev;
/* setup the quirk timer */
timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0);
@@ -2622,13 +2621,13 @@ static void prep_abort_v2_hw(struct hisi_hba *hisi_hba,
(port->id << CMD_HDR_PORT_OFF) |
(dev_is_sata(dev) <<
CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
- (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
+ (abort->type << CMD_HDR_ABORT_FLAG_OFF));
/* dw1 */
- hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
+ hdr->dw1 = cpu_to_le32(sas_dev->device_id << CMD_HDR_DEV_ID_OFF);
/* dw7 */
- hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
}
@@ -2812,15 +2811,12 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -3538,7 +3534,7 @@ static struct attribute *host_v2_hw_attrs[] = {
ATTRIBUTE_GROUPS(host_v2_hw);
-static int map_queues_v2_hw(struct Scsi_Host *shost)
+static void map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
@@ -3553,9 +3549,6 @@ static int map_queues_v2_hw(struct Scsi_Host *shost)
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
-
- return 0;
-
}
static struct scsi_host_template sht_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 0ef6c21bf081..d56b4bfd2767 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -406,6 +406,8 @@
#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
#define CMPLT_HDR_RSPNS_XFRD_OFF 10
#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_RSPNS_GOOD_OFF 11
+#define CMPLT_HDR_RSPNS_GOOD_MSK (0x1 << CMPLT_HDR_RSPNS_GOOD_OFF)
#define CMPLT_HDR_ERX_OFF 12
#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
#define CMPLT_HDR_ABORT_STAT_OFF 13
@@ -479,6 +481,9 @@ struct hisi_sas_err_record_v3 {
#define RX_DATA_LEN_UNDERFLOW_OFF 6
#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+#define RX_FIS_STATUS_ERR_OFF 0
+#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF)
+
#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
#define HISI_SAS_MSI_COUNT_V3_HW 32
@@ -530,7 +535,7 @@ MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
static int prot_mask;
-module_param(prot_mask, int, 0);
+module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
static void debugfs_work_handler_v3_hw(struct work_struct *work);
@@ -1219,7 +1224,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_port *port = slot->port;
struct sas_ssp_task *ssp_task = &task->ssp_task;
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
- struct hisi_sas_tmf_task *tmf = slot->tmf;
+ struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
unsigned char prot_op;
u8 *buf_cmd;
@@ -1450,28 +1455,28 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
}
static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
struct domain_device *dev = task->dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct hisi_sas_port *port = slot->port;
+ struct hisi_sas_device *sas_dev = dev->lldd_dev;
+ bool sata = dev_is_sata(dev);
/* dw0 */
- hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /*abort*/
+ hdr->dw0 = cpu_to_le32((5U << CMD_HDR_CMD_OFF) | /* abort */
(port->id << CMD_HDR_PORT_OFF) |
- (dev_is_sata(dev)
- << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
- (abort_flag
- << CMD_HDR_ABORT_FLAG_OFF));
+ (sata << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+ (abort->type << CMD_HDR_ABORT_FLAG_OFF));
/* dw1 */
- hdr->dw1 = cpu_to_le32(device_id
+ hdr->dw1 = cpu_to_le32(sas_dev->device_id
<< CMD_HDR_DEV_ID_OFF);
/* dw7 */
- hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->dw7 = cpu_to_le32(abort->tag << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
}
@@ -1484,7 +1489,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
- del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
@@ -1561,9 +1565,24 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
}
phy->port_id = port_id;
- phy->phy_attached = 1;
- hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
+
+ /*
+ * Call pm_runtime_get_noresume() which pairs with
+ * hisi_sas_phyup_pm_work() -> pm_runtime_put_sync().
+ * For failure call pm_runtime_put() as we are in a hardirq context.
+ */
+ pm_runtime_get_noresume(dev);
+ res = hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
+ if (!res)
+ pm_runtime_put(dev);
+
res = IRQ_HANDLED;
+
+ spin_lock(&phy->lock);
+ /* Delete timer and set phy_attached atomically */
+ del_timer(&phy->timer);
+ phy->phy_attached = 1;
+ spin_unlock(&phy->lock);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
@@ -1607,15 +1626,12 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
- if ((bcast_status & RX_BCAST_CHG_MSK) &&
- !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
- sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
- GFP_ATOMIC);
+ if (bcast_status & RX_BCAST_CHG_MSK)
+ hisi_sas_phy_bcast(phy);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2132,7 +2148,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
return IRQ_HANDLED;
}
-static void
+static bool
slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
@@ -2145,11 +2161,21 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_status_buf_addr_mem(slot);
u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
+ u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
u32 dw3 = le32_to_cpu(complete_hdr->dw3);
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ /*
+ * If returned response frame is incorrect because of data underflow,
+ * but I/O information has been written to the host memory, we examine
+ * response IU.
+ */
+ if (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
+ (complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))
+ return false;
+
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
@@ -2163,7 +2189,10 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+ ts->stat = SAS_PROTO_RESPONSE;
+ } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
} else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
@@ -2181,6 +2210,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
default:
break;
}
+ return true;
}
static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
@@ -2209,8 +2239,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
sas_dev = device->lldd_dev;
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
memset(ts, 0, sizeof(*ts));
@@ -2255,19 +2284,20 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
- slot_err_v3_hw(hisi_hba, task, slot);
- if (ts->stat != SAS_DATA_UNDERRUN)
- dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
- slot->idx, task, sas_dev->device_id,
- SAS_ADDR(device->sas_addr),
- dw0, dw1, complete_hdr->act, dw3,
- error_info[0], error_info[1],
- error_info[2], error_info[3]);
- if (unlikely(slot->abort)) {
- sas_task_abort(task);
- return;
+ if (slot_err_v3_hw(hisi_hba, task, slot)) {
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task, sas_dev->device_id,
+ SAS_ADDR(device->sas_addr),
+ dw0, dw1, complete_hdr->act, dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+ if (unlikely(slot->abort)) {
+ sas_task_abort(task);
+ return;
+ }
+ goto out;
}
- goto out;
}
switch (task->task_proto) {
@@ -2285,8 +2315,6 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_SAM_STAT_GOOD;
- dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
- DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
@@ -2390,17 +2418,25 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
return IRQ_WAKE_THREAD;
}
+static void hisi_sas_v3_free_vectors(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ pci_free_irq_vectors(pdev);
+}
+
static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
{
int vectors;
int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
struct Scsi_Host *shost = hisi_hba->shost;
+ struct pci_dev *pdev = hisi_hba->pci_dev;
struct irq_affinity desc = {
.pre_vectors = BASE_VECTORS_V3_HW,
};
min_msi = MIN_AFFINE_VECTORS_V3_HW;
- vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
+ vectors = pci_alloc_irq_vectors_affinity(pdev,
min_msi, max_msi,
PCI_IRQ_MSI |
PCI_IRQ_AFFINITY,
@@ -2412,6 +2448,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
shost->nr_hw_queues = hisi_hba->cq_nvecs;
+ devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
return 0;
}
@@ -2743,15 +2780,12 @@ static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
static int slave_configure_v3_hw(struct scsi_device *sdev)
{
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
- struct domain_device *ddev = sdev_to_domain_dev(sdev);
struct hisi_hba *hisi_hba = shost_priv(shost);
+ int ret = hisi_sas_slave_configure(sdev);
struct device *dev = hisi_hba->dev;
- int ret = sas_slave_configure(sdev);
if (ret)
return ret;
- if (!dev_is_sata(ddev))
- sas_change_queue_depth(sdev, 64);
if (sdev->type == TYPE_ENCLOSURE)
return 0;
@@ -3127,13 +3161,12 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
return 0;
}
-static int hisi_sas_map_queues(struct Scsi_Host *shost)
+static void hisi_sas_map_queues(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW);
}
static struct scsi_host_template sht_v3_hw = {
@@ -3959,6 +3992,54 @@ static const struct file_operations debugfs_bist_phy_v3_hw_fops = {
.owner = THIS_MODULE,
};
+static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct hisi_hba *hisi_hba = m->private;
+ unsigned int cnt;
+ int val;
+
+ if (hisi_hba->debugfs_bist_enable)
+ return -EPERM;
+
+ val = kstrtouint_from_user(buf, count, 0, &cnt);
+ if (val)
+ return val;
+
+ if (cnt)
+ return -EINVAL;
+
+ hisi_hba->debugfs_bist_cnt = 0;
+ return count;
+}
+
+static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_hba *hisi_hba = s->private;
+
+ seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt);
+
+ return 0;
+}
+
+static int debugfs_bist_cnt_v3_hw_open(struct inode *inode,
+ struct file *filp)
+{
+ return single_open(filp, debugfs_bist_cnt_v3_hw_show,
+ inode->i_private);
+}
+
+static const struct file_operations debugfs_bist_cnt_v3_hw_ops = {
+ .open = debugfs_bist_cnt_v3_hw_open,
+ .read = seq_read,
+ .write = debugfs_bist_cnt_v3_hw_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static const struct {
int value;
char *name;
@@ -4596,8 +4677,8 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
hisi_hba, &debugfs_bist_phy_v3_hw_fops);
- debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
- &hisi_hba->debugfs_bist_cnt);
+ debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry,
+ hisi_hba, &debugfs_bist_cnt_v3_hw_ops);
debugfs_create_file("loopback_mode", 0600,
hisi_hba->debugfs_bist_dentry,
@@ -4687,8 +4768,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
@@ -4709,7 +4788,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hisi_hba->regs) {
dev_err(dev, "cannot map register\n");
rc = -ENOMEM;
- goto err_out_ha;
+ goto err_out_free_host;
}
phy_nr = port_nr = hisi_hba->n_phy;
@@ -4718,7 +4797,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
if (!arr_phy || !arr_port) {
rc = -ENOMEM;
- goto err_out_ha;
+ goto err_out_free_host;
}
sha->sas_phy = arr_phy;
@@ -4759,22 +4838,24 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rc = interrupt_preinit_v3_hw(hisi_hba);
if (rc)
- goto err_out_debugfs;
+ goto err_out_undo_debugfs;
rc = scsi_add_host(shost, dev);
if (rc)
- goto err_out_free_irq_vectors;
+ goto err_out_undo_debugfs;
rc = sas_register_ha(sha);
if (rc)
- goto err_out_register_ha;
+ goto err_out_remove_host;
rc = hisi_sas_v3_init(hisi_hba);
if (rc)
- goto err_out_hw_init;
+ goto err_out_unregister_ha;
scsi_scan_host(shost);
+ pm_runtime_set_autosuspend_delay(dev, 5000);
+ pm_runtime_use_autosuspend(dev);
/*
* For the situation that there are ATA disks connected with SAS
* controller, it additionally creates ata_port which will affect the
@@ -4788,15 +4869,13 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
-err_out_hw_init:
+err_out_unregister_ha:
sas_unregister_ha(sha);
-err_out_register_ha:
+err_out_remove_host:
scsi_remove_host(shost);
-err_out_free_irq_vectors:
- pci_free_irq_vectors(pdev);
-err_out_debugfs:
+err_out_undo_debugfs:
debugfs_exit_v3_hw(hisi_hba);
-err_out_ha:
+err_out_free_host:
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
err_out:
@@ -4817,7 +4896,6 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq);
}
- pci_free_irq_vectors(pdev);
}
static void hisi_sas_v3_remove(struct pci_dev *pdev)
@@ -4848,6 +4926,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
+ down(&hisi_hba->sem);
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
@@ -4897,6 +4976,8 @@ static int _suspend_v3_hw(struct device *device)
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
+ dev_warn(dev, "entering suspend state\n");
+
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
@@ -4912,11 +4993,11 @@ static int _suspend_v3_hw(struct device *device)
hisi_sas_init_mem(hisi_hba);
- dev_warn(dev, "entering suspend state\n");
-
hisi_sas_release_tasks(hisi_hba);
sas_suspend_ha(sha);
+
+ dev_warn(dev, "end of suspending controller\n");
return 0;
}
@@ -4943,9 +5024,19 @@ static int _resume_v3_hw(struct device *device)
return rc;
}
phys_init_v3_hw(hisi_hba);
- sas_resume_ha(sha);
+
+ /*
+ * If a directly-attached disk is removed during suspend, a deadlock
+ * may occur, as the PHYE_RESUME_TIMEOUT processing will require the
+ * hisi_hba->device to be active, which can only happen when resume
+ * completes. So don't wait for the HA event workqueue to drain upon
+ * resume.
+ */
+ sas_resume_ha_no_sync(sha);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ dev_warn(dev, "end of resuming controller\n");
+
return 0;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 8049b00b6766..9857dba09c95 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -61,6 +61,7 @@ static void scsi_host_cls_release(struct device *dev)
static struct class shost_class = {
.name = "scsi_host",
.dev_release = scsi_host_cls_release,
+ .dev_groups = scsi_shost_groups,
};
/**
@@ -181,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ /*
+ * New SCSI devices cannot be attached anymore because of the SCSI host
+ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
+ * to zero because .exit_cmd_priv implementations may need the host
+ * pointer.
+ */
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+ wait_for_completion(&shost->tagset_freed);
+
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
@@ -228,10 +238,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto fail;
- error = scsi_mq_setup_tags(shost);
- if (error)
- goto fail;
-
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
if (!dma_dev)
@@ -239,6 +245,18 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
shost->dma_dev = dma_dev;
+ if (dma_dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
+ error = scsi_mq_setup_tags(shost);
+ if (error)
+ goto fail;
+
+ kref_init(&shost->tagset_refcnt);
+ init_completion(&shost->tagset_freed);
+
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
@@ -311,6 +329,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
fail:
return error;
}
@@ -344,12 +363,9 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- if (shost->tag_set.tags)
- scsi_mq_destroy_tags(shost);
-
kfree(shost->shost_data);
- ida_simple_remove(&host_index_ida, shost->host_no);
+ ida_free(&host_index_ida, shost->host_no);
if (shost->shost_state != SHOST_CREATED)
put_device(parent);
@@ -377,7 +393,7 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
- int index, i, j = 0;
+ int index;
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
if (!shost)
@@ -394,7 +410,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
kfree(shost);
return NULL;
@@ -483,17 +499,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
- shost->shost_dev.groups = shost->shost_dev_attr_groups;
- shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group;
- if (sht->shost_groups) {
- for (i = 0; sht->shost_groups[i] &&
- j < ARRAY_SIZE(shost->shost_dev_attr_groups);
- i++, j++) {
- shost->shost_dev_attr_groups[j] =
- sht->shost_groups[i];
- }
- }
- WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups));
+ shost->shost_dev.groups = sht->shost_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
@@ -575,8 +581,7 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);
-static bool scsi_host_check_in_flight(struct request *rq, void *data,
- bool reserved)
+static bool scsi_host_check_in_flight(struct request *rq, void *data)
{
int *count = data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -671,7 +676,7 @@ void scsi_flush_work(struct Scsi_Host *shost)
}
EXPORT_SYMBOL_GPL(scsi_flush_work);
-static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
+static bool complete_all_cmds_iter(struct request *rq, void *data)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
enum scsi_host_status status = *(enum scsi_host_status *)data;
@@ -702,17 +707,16 @@ void scsi_host_complete_all_commands(struct Scsi_Host *shost,
EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
struct scsi_host_busy_iter_data {
- bool (*fn)(struct scsi_cmnd *, void *, bool);
+ bool (*fn)(struct scsi_cmnd *, void *);
void *priv;
};
-static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
- bool reserved)
+static bool __scsi_host_busy_iter_fn(struct request *req, void *priv)
{
struct scsi_host_busy_iter_data *iter_data = priv;
struct scsi_cmnd *sc = blk_mq_rq_to_pdu(req);
- return iter_data->fn(sc, iter_data->priv, reserved);
+ return iter_data->fn(sc, iter_data->priv);
}
/**
@@ -725,7 +729,7 @@ static bool __scsi_host_busy_iter_fn(struct request *req, void *priv,
* ithas to be provided by the caller
**/
void scsi_host_busy_iter(struct Scsi_Host *shost,
- bool (*fn)(struct scsi_cmnd *, void *, bool),
+ bool (*fn)(struct scsi_cmnd *, void *),
void *priv)
{
struct scsi_host_busy_iter_data iter_data = {
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cdf3328cc065..f8e832b1bc46 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4354,7 +4354,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
int i, ndevs_to_allocate;
int raid_ctlr_position;
bool physical_device;
- DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
@@ -4368,7 +4367,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
- memset(lunzerobits, 0, sizeof(lunzerobits));
h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
@@ -6235,8 +6233,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
offset = (i + 1) % HPSA_NRESERVED_CMDS;
continue;
}
- set_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ set_bit(i, h->cmd_pool_bits);
break; /* it's ours now. */
}
hpsa_cmd_partial_init(h, i, c);
@@ -6263,8 +6260,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
int i;
i = c - h->cmd_pool;
- clear_bit(i & (BITS_PER_LONG - 1),
- h->cmd_pool_bits + (i / BITS_PER_LONG));
+ clear_bit(i, h->cmd_pool_bits);
}
}
@@ -8032,7 +8028,7 @@ out_disable:
static void hpsa_free_cmd_pool(struct ctlr_info *h)
{
- kfree(h->cmd_pool_bits);
+ bitmap_free(h->cmd_pool_bits);
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
dma_free_coherent(&h->pdev->dev,
@@ -8054,9 +8050,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
{
- h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
- sizeof(unsigned long),
- GFP_KERNEL);
+ h->cmd_pool_bits = bitmap_zalloc(h->nr_cmds, GFP_KERNEL);
h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool),
&h->cmd_pool_dhandle, GFP_KERNEL);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index d04245e379d7..7e8903718245 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1044,10 +1044,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp)
req->channel = scp->device->channel;
req->target = scp->device->id;
req->lun = scp->device->lun;
- req->header.size = cpu_to_le32(
- sizeof(struct hpt_iop_request_scsi_command)
- - sizeof(struct hpt_iopsg)
- + sg_count * sizeof(struct hpt_iopsg));
+ req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count));
memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
hba->ops->post_req(hba, _req);
@@ -1174,6 +1171,7 @@ static struct scsi_host_template driver_template = {
.slave_configure = hptiop_slave_config,
.this_id = -1,
.change_queue_depth = hptiop_adjust_disk_queue_depth,
+ .cmd_size = sizeof(struct hpt_cmd_priv),
};
static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
@@ -1396,8 +1394,8 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
host->max_cmd_len = 16;
- req_size = sizeof(struct hpt_iop_request_scsi_command)
- + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
+ req_size = struct_size((struct hpt_iop_request_scsi_command *)0,
+ sg_list, hba->max_sg_descriptors);
if ((req_size & 0x1f) != 0)
req_size = (req_size + 0x1f) & ~0x1f;
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 35184c2008af..394ef6aa469e 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -228,7 +228,7 @@ struct hpt_iop_request_scsi_command {
u8 pad1;
u8 cdb[16];
__le32 dataxfer_length;
- struct hpt_iopsg sg_list[1];
+ struct hpt_iopsg sg_list[];
};
struct hpt_iop_request_ioctl_command {
@@ -237,7 +237,7 @@ struct hpt_iop_request_ioctl_command {
__le32 inbuf_size;
__le32 outbuf_size;
__le32 bytes_returned;
- u8 buf[1];
+ u8 buf[];
/* out data should be put at buf[(inbuf_size+3)&~3] */
};
@@ -251,13 +251,13 @@ struct hptiop_request {
int index;
};
-struct hpt_scsi_pointer {
+struct hpt_cmd_priv {
int mapped;
int sgcnt;
dma_addr_t dma_handle;
};
-#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
+#define HPT_SCP(scp) ((struct hpt_cmd_priv *)scsi_cmd_priv(scp))
enum hptiop_family {
UNKNOWN_BASED_IOP,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index d0eab5700dc5..1a0c0b7289d2 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
-static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
-static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
static const char *unknown_error = "unknown error";
@@ -708,8 +708,13 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
vhost->async_crq.cur = 0;
- list_for_each_entry(tgt, &vhost->targets, queue)
- ibmvfc_del_tgt(tgt);
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (vhost->client_migrated)
+ tgt->need_login = 1;
+ else
+ ibmvfc_del_tgt(tgt);
+ }
+
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
@@ -917,7 +922,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
unsigned long flags;
- ibmvfc_release_sub_crqs(vhost);
+ ibmvfc_dereg_sub_crqs(vhost);
/* Re-enable the CRQ */
do {
@@ -936,7 +941,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_init_sub_crqs(vhost);
+ ibmvfc_reg_sub_crqs(vhost);
return rc;
}
@@ -955,7 +960,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_queue *crq = &vhost->crq;
- ibmvfc_release_sub_crqs(vhost);
+ ibmvfc_dereg_sub_crqs(vhost);
/* Close the CRQ */
do {
@@ -988,7 +993,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_init_sub_crqs(vhost);
+ ibmvfc_reg_sub_crqs(vhost);
return rc;
}
@@ -3235,9 +3240,12 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
/* We need to re-setup the interpartition connection */
dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
vhost->client_migrated = 1;
+
+ scsi_block_requests(vhost->host);
ibmvfc_purge_requests(vhost, DID_REQUEUE);
- ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+ ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
+ wake_up(&vhost->work_wait_q);
} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
@@ -5682,6 +5690,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
queue->cur = 0;
queue->fmt = fmt;
queue->size = PAGE_SIZE / fmt_size;
+
+ queue->vhost = vhost;
return 0;
}
@@ -5757,9 +5767,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
ENTER;
- if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
- return -ENOMEM;
-
rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
&scrq->cookie, &scrq->hw_irq);
@@ -5790,7 +5797,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
}
scrq->hwq_id = index;
- scrq->vhost = vhost;
LEAVE;
return 0;
@@ -5800,7 +5806,6 @@ irq_failed:
rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
} while (rtas_busy_delay(rc));
reg_failed:
- ibmvfc_free_queue(vhost, scrq);
LEAVE;
return rc;
}
@@ -5826,12 +5831,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
if (rc)
dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
- ibmvfc_free_queue(vhost, scrq);
+ /* Clean out the queue */
+ memset(scrq->msgs.crq, 0, PAGE_SIZE);
+ scrq->cur = 0;
+
+ LEAVE;
+}
+
+static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i, j;
+
+ ENTER;
+ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ return;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ if (ibmvfc_register_scsi_channel(vhost, i)) {
+ for (j = i; j > 0; j--)
+ ibmvfc_deregister_scsi_channel(vhost, j - 1);
+ vhost->do_enquiry = 0;
+ return;
+ }
+ }
+
+ LEAVE;
+}
+
+static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i;
+
+ ENTER;
+ if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
+ return;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++)
+ ibmvfc_deregister_scsi_channel(vhost, i);
+
LEAVE;
}
static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
{
+ struct ibmvfc_queue *scrq;
int i, j;
ENTER;
@@ -5847,30 +5890,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
}
for (i = 0; i < nr_scsi_hw_queues; i++) {
- if (ibmvfc_register_scsi_channel(vhost, i)) {
- for (j = i; j > 0; j--)
- ibmvfc_deregister_scsi_channel(vhost, j - 1);
+ scrq = &vhost->scsi_scrqs.scrqs[i];
+ if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
+ for (j = i; j > 0; j--) {
+ scrq = &vhost->scsi_scrqs.scrqs[j - 1];
+ ibmvfc_free_queue(vhost, scrq);
+ }
kfree(vhost->scsi_scrqs.scrqs);
vhost->scsi_scrqs.scrqs = NULL;
vhost->scsi_scrqs.active_queues = 0;
vhost->do_enquiry = 0;
- break;
+ vhost->mq_enabled = 0;
+ return;
}
}
+ ibmvfc_reg_sub_crqs(vhost);
+
LEAVE;
}
static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
{
+ struct ibmvfc_queue *scrq;
int i;
ENTER;
if (!vhost->scsi_scrqs.scrqs)
return;
- for (i = 0; i < nr_scsi_hw_queues; i++)
- ibmvfc_deregister_scsi_channel(vhost, i);
+ ibmvfc_dereg_sub_crqs(vhost);
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ scrq = &vhost->scsi_scrqs.scrqs[i];
+ ibmvfc_free_queue(vhost, scrq);
+ }
kfree(vhost->scsi_scrqs.scrqs);
vhost->scsi_scrqs.scrqs = NULL;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 3718406e0988..c39a245f43d0 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -789,6 +789,7 @@ struct ibmvfc_queue {
spinlock_t _lock;
spinlock_t *q_lock;
+ struct ibmvfc_host *vhost;
struct ibmvfc_event_pool evt_pool;
struct list_head sent;
struct list_head free;
@@ -797,7 +798,6 @@ struct ibmvfc_queue {
union ibmvfc_iu cancel_rsp;
/* Sub-CRQ fields */
- struct ibmvfc_host *vhost;
unsigned long cookie;
unsigned long vios_cookie;
unsigned long hw_irq;
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 61f06f6885a5..e8770310a64b 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -36,7 +36,7 @@
#define IBMVSCSIS_VERSION "v0.2"
-#define INITIAL_SRP_LIMIT 800
+#define INITIAL_SRP_LIMIT 1024
#define DEFAULT_MAX_SECTORS 256
#define MAX_TXU 1024 * 1024
@@ -444,7 +444,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
break;
/*
- * Can transition from this state to to unconfiguring
+ * Can transition from this state to unconfiguring
* or err disconnect.
*/
case ERR_DISCONNECT_RECONNECT:
@@ -1872,11 +1872,8 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
*/
static void ibmvscsis_send_messages(struct scsi_info *vscsi)
{
- u64 msg_hi = 0;
- /* note do not attempt to access the IU_data_ptr with this pointer
- * it is not valid
- */
- struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
+ struct viosrp_crq empty_crq = { };
+ struct viosrp_crq *crq = &empty_crq;
struct ibmvscsis_cmd *cmd, *nxt;
long rc = ADAPT_SUCCESS;
bool retry = false;
@@ -1940,7 +1937,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
crq->IU_length = cpu_to_be16(cmd->rsp.len);
rc = h_send_crq(vscsi->dma_dev->unit_address,
- be64_to_cpu(msg_hi),
+ be64_to_cpu(crq->high),
be64_to_cpu(cmd->rsp.tag));
dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 8afdb4dba2be..7a499d621c25 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -66,7 +66,7 @@ static void got_it(imm_struct *dev)
{
dev->base = dev->dev->port->base;
if (dev->cur_cmd)
- dev->cur_cmd->SCp.phase = 1;
+ imm_scsi_pointer(dev->cur_cmd)->phase = 1;
else
wake_up(dev->waiting);
}
@@ -618,13 +618,14 @@ static inline int imm_send_command(struct scsi_cmnd *cmd)
* The driver appears to remain stable if we speed up the parallel port
* i/o in this function, but not elsewhere.
*/
-static int imm_completion(struct scsi_cmnd *cmd)
+static int imm_completion(struct scsi_cmnd *const cmd)
{
/* Return codes:
* -1 Error
* 0 Told to schedule
* 1 Finished data transfer
*/
+ struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd);
imm_struct *dev = imm_dev(cmd->device->host);
unsigned short ppb = dev->base;
unsigned long start_jiffies = jiffies;
@@ -660,44 +661,43 @@ static int imm_completion(struct scsi_cmnd *cmd)
* a) Drive status is screwy (!ready && !present)
* b) Drive is requesting/sending more data than expected
*/
- if (((r & 0x88) != 0x88) || (cmd->SCp.this_residual <= 0)) {
+ if ((r & 0x88) != 0x88 || scsi_pointer->this_residual <= 0) {
imm_fail(dev, DID_ERROR);
return -1; /* ERROR_RETURN */
}
/* determine if we should use burst I/O */
if (dev->rd == 0) {
- fast = (bulk
- && (cmd->SCp.this_residual >=
- IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 2;
- status = imm_out(dev, cmd->SCp.ptr, fast);
+ fast = bulk && scsi_pointer->this_residual >=
+ IMM_BURST_SIZE ? IMM_BURST_SIZE : 2;
+ status = imm_out(dev, scsi_pointer->ptr, fast);
} else {
- fast = (bulk
- && (cmd->SCp.this_residual >=
- IMM_BURST_SIZE)) ? IMM_BURST_SIZE : 1;
- status = imm_in(dev, cmd->SCp.ptr, fast);
+ fast = bulk && scsi_pointer->this_residual >=
+ IMM_BURST_SIZE ? IMM_BURST_SIZE : 1;
+ status = imm_in(dev, scsi_pointer->ptr, fast);
}
- cmd->SCp.ptr += fast;
- cmd->SCp.this_residual -= fast;
+ scsi_pointer->ptr += fast;
+ scsi_pointer->this_residual -= fast;
if (!status) {
imm_fail(dev, DID_BUS_BUSY);
return -1; /* ERROR_RETURN */
}
- if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ if (scsi_pointer->buffer && !scsi_pointer->this_residual) {
/* if scatter/gather, advance to the next segment */
- if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- cmd->SCp.this_residual =
- cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (scsi_pointer->buffers_residual--) {
+ scsi_pointer->buffer =
+ sg_next(scsi_pointer->buffer);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
/*
* Make sure that we transfer even number of bytes
* otherwise it makes imm_byte_out() messy.
*/
- if (cmd->SCp.this_residual & 0x01)
- cmd->SCp.this_residual++;
+ if (scsi_pointer->this_residual & 0x01)
+ scsi_pointer->this_residual++;
}
}
/* Now check to see if the drive is ready to comunicate */
@@ -762,7 +762,7 @@ static void imm_interrupt(struct work_struct *work)
}
#endif
- if (cmd->SCp.phase > 1)
+ if (imm_scsi_pointer(cmd)->phase > 1)
imm_disconnect(dev);
imm_pb_dismiss(dev);
@@ -774,8 +774,9 @@ static void imm_interrupt(struct work_struct *work)
return;
}
-static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
+static int imm_engine(imm_struct *dev, struct scsi_cmnd *const cmd)
{
+ struct scsi_pointer *scsi_pointer = imm_scsi_pointer(cmd);
unsigned short ppb = dev->base;
unsigned char l = 0, h = 0;
int retv, x;
@@ -786,7 +787,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (dev->failed)
return 0;
- switch (cmd->SCp.phase) {
+ switch (scsi_pointer->phase) {
case 0: /* Phase 0 - Waiting for parport */
if (time_after(jiffies, dev->jstart + HZ)) {
/*
@@ -800,7 +801,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
case 1: /* Phase 1 - Connected */
imm_connect(dev, CONNECT_EPP_MAYBE);
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 2: /* Phase 2 - We are now talking to the scsi bus */
@@ -808,7 +809,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
imm_fail(dev, DID_NO_CONNECT);
return 0;
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
@@ -818,23 +819,23 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (!imm_send_command(cmd))
return 0;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = 0;
- cmd->SCp.ptr = NULL;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->ptr = NULL;
}
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.phase++;
- if (cmd->SCp.this_residual & 0x01)
- cmd->SCp.this_residual++;
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->phase++;
+ if (scsi_pointer->this_residual & 0x01)
+ scsi_pointer->this_residual++;
fallthrough;
case 5: /* Phase 5 - Pre-Data transfer stage */
@@ -851,7 +852,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if ((dev->dp) && (dev->rd))
if (imm_negotiate(dev))
return 0;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 6: /* Phase 6 - Data transfer stage */
@@ -867,7 +868,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 7: /* Phase 7 - Post data transfer stage */
@@ -879,7 +880,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
w_ctr(ppb, 0x4);
}
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 8: /* Phase 8 - Read status/message */
@@ -922,7 +923,7 @@ static int imm_queuecommand_lck(struct scsi_cmnd *cmd)
dev->jstart = jiffies;
dev->cur_cmd = cmd;
cmd->result = DID_ERROR << 16; /* default return code */
- cmd->SCp.phase = 0; /* bus free */
+ imm_scsi_pointer(cmd)->phase = 0; /* bus free */
schedule_delayed_work(&dev->imm_tq, 0);
@@ -961,7 +962,7 @@ static int imm_abort(struct scsi_cmnd *cmd)
* have tied the SCSI_MESSAGE line high in the interface
*/
- switch (cmd->SCp.phase) {
+ switch (imm_scsi_pointer(cmd)->phase) {
case 0: /* Do not have access to parport */
case 1: /* Have not connected to interface */
dev->cur_cmd = NULL; /* Forget the problem */
@@ -987,7 +988,7 @@ static int imm_reset(struct scsi_cmnd *cmd)
{
imm_struct *dev = imm_dev(cmd->device->host);
- if (cmd->SCp.phase)
+ if (imm_scsi_pointer(cmd)->phase)
imm_disconnect(dev);
dev->cur_cmd = NULL; /* Forget the problem */
@@ -1109,6 +1110,7 @@ static struct scsi_host_template imm_template = {
.sg_tablesize = SG_ALL,
.can_queue = 1,
.slave_alloc = imm_adjust_queue,
+ .cmd_size = sizeof(struct scsi_pointer),
};
/***************************************************************************
diff --git a/drivers/scsi/imm.h b/drivers/scsi/imm.h
index 7f2bb35b1b87..411cf94af5b0 100644
--- a/drivers/scsi/imm.h
+++ b/drivers/scsi/imm.h
@@ -139,6 +139,11 @@ static char *IMM_MODE_STRING[] =
#define w_ctr(x,y) outb(y, (x)+2)
#endif
+static inline struct scsi_pointer *imm_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static int imm_engine(imm_struct *, struct scsi_cmnd *);
#endif /* _IMM_H */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index fd6da96bc51a..375261d67619 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1166,7 +1166,7 @@ static void tulip_scsi(struct initio_host * host)
return;
}
if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
- if ((scb = host->active) != NULL)
+ if (host->active)
initio_next_state(host);
return;
}
@@ -2553,7 +2553,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
SENSE_SIZE, DMA_FROM_DEVICE);
cblk->senseptr = (u32)dma_addr;
cblk->senselen = SENSE_SIZE;
- cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
+ initio_priv(cmnd)->sense_dma_addr = dma_addr;
cblk->cdblen = cmnd->cmd_len;
/* Clear the returned status */
@@ -2577,7 +2577,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
cblk->bufptr = (u32)dma_addr;
- cmnd->SCp.dma_handle = dma_addr;
+ initio_priv(cmnd)->sglist_dma_addr = dma_addr;
cblk->sglen = nseg;
@@ -2602,13 +2602,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
/**
* i91u_queuecommand_lck - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
- * @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
-
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
@@ -2706,16 +2704,17 @@ static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
{
/* auto sense buffer */
- if (cmnd->SCp.ptr) {
+ if (initio_priv(cmnd)->sense_dma_addr) {
dma_unmap_single(&pci_dev->dev,
- (dma_addr_t)((unsigned long)cmnd->SCp.ptr),
+ initio_priv(cmnd)->sense_dma_addr,
SENSE_SIZE, DMA_FROM_DEVICE);
- cmnd->SCp.ptr = NULL;
+ initio_priv(cmnd)->sense_dma_addr = 0;
}
/* request buffer */
if (scsi_sg_count(cmnd)) {
- dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
+ dma_unmap_single(&pci_dev->dev,
+ initio_priv(cmnd)->sglist_dma_addr,
sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
DMA_BIDIRECTIONAL);
@@ -2798,6 +2797,7 @@ static struct scsi_host_template initio_template = {
.can_queue = MAX_TARGETS * i91u_MAXQUEUE,
.this_id = 1,
.sg_tablesize = SG_ALL,
+ .cmd_size = sizeof(struct initio_cmd_priv),
};
static int initio_probe_one(struct pci_dev *pdev,
@@ -2849,7 +2849,8 @@ static int initio_probe_one(struct pci_dev *pdev,
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
- if ((scb = kzalloc(i, GFP_DMA)) != NULL)
+ scb = kzalloc(i, GFP_KERNEL);
+ if (scb)
break;
}
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index 9fd010cf1f8a..7c9741552654 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -640,3 +640,12 @@ typedef struct _NVRAM {
#define SCSI_RESET_HOST_RESET 0x200
#define SCSI_RESET_ACTION 0xff
+struct initio_cmd_priv {
+ dma_addr_t sense_dma_addr;
+ dma_addr_t sglist_dma_addr;
+};
+
+static inline struct initio_cmd_priv *initio_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 104bee9b3a9d..9d01a3e3c26a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3456,7 +3456,7 @@ static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags = 0;
@@ -4182,7 +4182,7 @@ static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_hostrcb *hostrcb;
@@ -4206,7 +4206,7 @@ static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_hostrcb *hostrcb;
@@ -4267,7 +4267,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
struct ipr_dump *dump;
@@ -4456,7 +4456,7 @@ static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *cdev = container_of(kobj, struct device, kobj);
+ struct device *cdev = kobj_to_dev(kobj);
struct Scsi_Host *shost = class_to_shost(cdev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
int rc;
@@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
GFP_KERNEL);
if (!ioa_cfg->hrrq[i].host_rrq) {
- while (--i > 0)
+ while (--i >= 0)
dma_free_coherent(&pdev->dev,
sizeof(u32) * ioa_cfg->hrrq[i].size,
ioa_cfg->hrrq[i].host_rrq,
@@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->vectors_info[i].desc,
&ioa_cfg->hrrq[i]);
if (rc) {
- while (--i >= 0)
+ while (--i > 0)
free_irq(pci_irq_vector(pdev, i),
&ioa_cfg->hrrq[i]);
return rc;
@@ -10092,7 +10092,6 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
unsigned long lock_flags = 0;
- irqreturn_t rc = IRQ_HANDLED;
dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -10101,7 +10100,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
wake_up(&ioa_cfg->msi_wait_q);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return rc;
+ return IRQ_HANDLED;
}
/**
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 498bf04499ce..16419aeec02d 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -180,9 +180,13 @@
#include <linux/types.h>
#include <linux/dma-mapping.h>
-#include <scsi/sg.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sg.h>
#include "ips.h"
@@ -638,8 +642,7 @@ ips_setup_funclist(ips_ha_t * ha)
/* Remove a driver */
/* */
/****************************************************************************/
-static int
-ips_release(struct Scsi_Host *sh)
+static void ips_release(struct Scsi_Host *sh)
{
ips_scb_t *scb;
ips_ha_t *ha;
@@ -655,13 +658,12 @@ ips_release(struct Scsi_Host *sh)
printk(KERN_WARNING
"(%s) release, invalid Scsi_Host pointer.\n", ips_name);
BUG();
- return (FALSE);
}
ha = IPS_HA(sh);
if (!ha)
- return (FALSE);
+ return;
/* flush the cache on the controller */
scb = &ha->scbs[ha->max_cmds - 1];
@@ -699,8 +701,6 @@ ips_release(struct Scsi_Host *sh)
scsi_host_put(sh);
ips_released_controllers++;
-
- return (FALSE);
}
/****************************************************************************/
@@ -949,7 +949,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
scsi_done(scsi_cmd);
}
- ha->active = FALSE;
+ ha->active = false;
return (FAILED);
}
@@ -978,7 +978,7 @@ static int __ips_eh_reset(struct scsi_cmnd *SC)
scsi_done(scsi_cmd);
}
- ha->active = FALSE;
+ ha->active = false;
return (FAILED);
}
@@ -1291,7 +1291,7 @@ ips_intr_copperhead(ips_ha_t * ha)
return 0;
}
- while (TRUE) {
+ while (true) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
@@ -1355,7 +1355,7 @@ ips_intr_morpheus(ips_ha_t * ha)
return 0;
}
- while (TRUE) {
+ while (true) {
sp = &ha->sp;
intrstatus = (*ha->func.isintr) (ha);
@@ -3090,8 +3090,8 @@ ipsintr_blocking(ips_ha_t * ha, ips_scb_t * scb)
METHOD_TRACE("ipsintr_blocking", 2);
ips_freescb(ha, scb);
- if ((ha->waitflag == TRUE) && (ha->cmd_in_progress == scb->cdb[0])) {
- ha->waitflag = FALSE;
+ if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0]) {
+ ha->waitflag = false;
return;
}
@@ -3391,7 +3391,7 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
METHOD_TRACE("ips_send_wait", 1);
if (intr != IPS_FFDC) { /* Won't be Waiting if this is a Time Stamp */
- ha->waitflag = TRUE;
+ ha->waitflag = true;
ha->cmd_in_progress = scb->cdb[0];
}
scb->callback = ipsintr_blocking;
@@ -3468,10 +3468,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
if (scb->bus > 0) {
/* Controller commands can't be issued */
/* to real devices -- fail them */
- if ((ha->waitflag == TRUE) &&
- (ha->cmd_in_progress == scb->cdb[0])) {
- ha->waitflag = FALSE;
- }
+ if (ha->waitflag && ha->cmd_in_progress == scb->cdb[0])
+ ha->waitflag = false;
return (1);
}
@@ -4619,7 +4617,7 @@ ips_poll_for_flush_complete(ips_ha_t * ha)
{
IPS_STATUS cstatus;
- while (TRUE) {
+ while (true) {
cstatus.value = (*ha->func.statupd) (ha);
if (cstatus.value == 0xffffffff) /* If No Interrupt to process */
@@ -5542,26 +5540,26 @@ ips_wait(ips_ha_t * ha, int time, int intr)
METHOD_TRACE("ips_wait", 1);
ret = IPS_FAILURE;
- done = FALSE;
+ done = false;
time *= IPS_ONE_SEC; /* convert seconds */
while ((time > 0) && (!done)) {
if (intr == IPS_INTR_ON) {
- if (ha->waitflag == FALSE) {
+ if (!ha->waitflag) {
ret = IPS_SUCCESS;
- done = TRUE;
+ done = true;
break;
}
} else if (intr == IPS_INTR_IORL) {
- if (ha->waitflag == FALSE) {
+ if (!ha->waitflag) {
/*
* controller generated an interrupt to
* acknowledge completion of the command
* and ips_intr() has serviced the interrupt.
*/
ret = IPS_SUCCESS;
- done = TRUE;
+ done = true;
break;
}
@@ -5596,7 +5594,7 @@ ips_write_driver_status(ips_ha_t * ha, int intr)
{
METHOD_TRACE("ips_write_driver_status", 1);
- if (!ips_readwrite_page5(ha, FALSE, intr)) {
+ if (!ips_readwrite_page5(ha, false, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to read NVRAM page 5.\n");
@@ -5634,7 +5632,7 @@ ips_write_driver_status(ips_ha_t * ha, int intr)
ha->nvram->versioning = 0; /* Indicate the Driver Does Not Support Versioning */
/* now update the page */
- if (!ips_readwrite_page5(ha, TRUE, intr)) {
+ if (!ips_readwrite_page5(ha, true, intr)) {
IPS_PRINTK(KERN_WARNING, ha->pcidev,
"unable to write NVRAM page 5.\n");
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d690d9cf7eb1..35589b6af90d 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -413,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for io request object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -428,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
dev_warn(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received "
"event 0x%x for remote device object "
- "that doesnt exist.\n",
+ "that doesn't exist.\n",
__func__,
ihost,
ent);
@@ -462,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
} else
dev_err(&ihost->pdev->dev,
"%s: SCIC Controller 0x%p received event 0x%x "
- "for remote device object 0x%0x that doesnt "
+ "for remote device object 0x%0x that doesn't "
"exist.\n",
__func__,
ihost,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index aade707c5553..e294d5d961eb 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -193,7 +193,6 @@ static struct sas_domain_function_template isci_transport_ops = {
/* Task Management Functions. Must be called from process context. */
.lldd_abort_task = isci_task_abort_task,
.lldd_abort_task_set = isci_task_abort_task_set,
- .lldd_clear_aca = isci_task_clear_aca,
.lldd_clear_task_set = isci_task_clear_task_set,
.lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
.lldd_lu_reset = isci_task_lu_reset,
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index fcaa84a3c210..6370cdbfba08 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1047,7 +1047,8 @@ request_started_state_tc_event(struct isci_request *ireq,
resp_iu = &ireq->ssp.rsp;
datapres = resp_iu->datapres;
- if (datapres == 1 || datapres == 2) {
+ if (datapres == SAS_DATAPRES_RESPONSE_DATA ||
+ datapres == SAS_DATAPRES_SENSE_DATA) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
@@ -1730,8 +1731,8 @@ sci_io_request_frame_handler(struct isci_request *ireq,
resp_iu = &ireq->ssp.rsp;
- if (resp_iu->datapres == 0x01 ||
- resp_iu->datapres == 0x02) {
+ if (resp_iu->datapres == SAS_DATAPRES_RESPONSE_DATA ||
+ resp_iu->datapres == SAS_DATAPRES_SENSE_DATA) {
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
} else {
@@ -2181,7 +2182,7 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
u16 len = sci_req_tx_bytes(ireq);
- /* likely non-error data underrrun, workaround missing
+ /* likely non-error data underrun, workaround missing
* d2h frame from the controller
*/
if (d2h->fis_type != FIS_REGD2H) {
@@ -2934,8 +2935,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
/* Normal notification (task_done) */
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
}
spin_unlock_irqrestore(&task->task_state_lock, task_flags);
@@ -3406,9 +3406,9 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
return ireq;
}
-static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
- struct sas_task *task,
- u16 tag)
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
{
struct isci_request *ireq;
@@ -3434,16 +3434,12 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
}
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag)
+ struct sas_task *task, struct isci_request *ireq)
{
enum sci_status status;
- struct isci_request *ireq;
unsigned long flags;
int ret = 0;
- /* do common allocation and init of request object. */
- ireq = isci_io_request_from_tag(ihost, task, tag);
-
status = isci_io_request_build(ihost, ireq, idev);
if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index aff95317fcf4..20b141739e4d 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -291,7 +291,10 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
struct isci_tmf *isci_tmf,
u16 tag);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
- struct sas_task *task, u16 tag);
+ struct sas_task *task, struct isci_request *ireq);
+struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag);
enum sci_status
sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 3fd88d72a0c0..c514b20293b2 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -91,8 +91,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
/* Normal notification (task_done) */
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->lldd_task = NULL;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -162,18 +161,17 @@ int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
SAS_TASK_UNDELIVERED,
SAS_SAM_STAT_TASK_ABORTED);
} else {
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ struct isci_request *ireq;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
spin_unlock_irqrestore(&task->task_state_lock, flags);
/* build and send the request. */
- status = isci_request_execute(ihost, idev, task, tag);
+ /* do common allocation and init of request object. */
+ status = isci_request_execute(ihost, idev, task, ireq);
if (status != SCI_SUCCESS) {
- spin_lock_irqsave(&task->task_state_lock, flags);
- /* Did not really start this command. */
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
if (test_bit(IDEV_GONE, &idev->flags)) {
/* Indicate that the device
* is gone.
@@ -498,7 +496,6 @@ int isci_task_abort_task(struct sas_task *task)
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
- (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
old_request) {
idev = isci_get_device(task->dev->lldd_dev);
target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
@@ -532,8 +529,7 @@ int isci_task_abort_task(struct sas_task *task)
*/
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
spin_unlock_irqrestore(&task->task_state_lock, flags);
ret = TMF_RESP_FUNC_COMPLETE;
@@ -581,8 +577,7 @@ int isci_task_abort_task(struct sas_task *task)
test_bit(IDEV_GONE, &idev->flags));
spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -626,24 +621,6 @@ int isci_task_abort_task_set(
/**
- * isci_task_clear_aca() - This function is one of the SAS Domain Template
- * functions. This is one of the Task Management functoins called by libsas.
- * @d_device: This parameter specifies the domain device associated with this
- * request.
- * @lun: This parameter specifies the lun associated with this request.
- *
- * status, zero indicates success.
- */
-int isci_task_clear_aca(
- struct domain_device *d_device,
- u8 *lun)
-{
- return TMF_RESP_FUNC_FAILED;
-}
-
-
-
-/**
* isci_task_clear_task_set() - This function is one of the SAS Domain Template
* functions. This is one of the Task Management functoins called by libsas.
* @d_device: This parameter specifies the domain device associated with this
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index cae168b8916f..f96633fa6939 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -140,10 +140,6 @@ int isci_task_abort_task_set(
struct domain_device *d_device,
u8 *lun);
-int isci_task_clear_aca(
- struct domain_device *d_device,
- u8 *lun);
-
int isci_task_clear_task_set(
struct domain_device *d_device,
u8 *lun);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 1bc37593c88f..5fb1f364e815 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -52,6 +52,10 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
static unsigned int iscsi_max_lun = ~0;
module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+static bool iscsi_recv_from_iscsi_q;
+module_param_named(recv_from_iscsi_q, iscsi_recv_from_iscsi_q, bool, 0644);
+MODULE_PARM_DESC(recv_from_iscsi_q, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
+
static int iscsi_sw_tcp_dbg;
module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
S_IRUGO | S_IWUSR);
@@ -122,20 +126,13 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
return 0;
}
-static void iscsi_sw_tcp_data_ready(struct sock *sk)
+static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
{
- struct iscsi_conn *conn;
- struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
read_descriptor_t rd_desc;
- read_lock_bh(&sk->sk_callback_lock);
- conn = sk->sk_user_data;
- if (!conn) {
- read_unlock_bh(&sk->sk_callback_lock);
- return;
- }
- tcp_conn = conn->dd_data;
-
/*
* Use rd_desc to pass 'conn' to iscsi_tcp_recv.
* We set count to 1 because we want the network layer to
@@ -144,13 +141,48 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
*/
rd_desc.arg.data = conn;
rd_desc.count = 1;
- tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
- iscsi_sw_sk_state_check(sk);
+ tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
/* If we had to (atomically) map a highmem page,
* unmap it now. */
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+
+ iscsi_sw_sk_state_check(sk);
+}
+
+static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
+{
+ struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
+ recvwork);
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sock *sk = tcp_sw_conn->sock->sk;
+
+ lock_sock(sk);
+ iscsi_sw_tcp_recv_data(conn);
+ release_sock(sk);
+}
+
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
+{
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_conn *conn;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ if (tcp_sw_conn->queue_recv)
+ iscsi_conn_queue_recv(conn);
+ else
+ iscsi_sw_tcp_recv_data(conn);
read_unlock_bh(&sk->sk_callback_lock);
}
@@ -205,7 +237,7 @@ static void iscsi_sw_tcp_write_space(struct sock *sk)
old_write_space(sk);
ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
static void iscsi_sw_tcp_conn_set_callbacks(struct iscsi_conn *conn)
@@ -274,7 +306,10 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
copy = segment->size - offset;
if (segment->total_copied + segment->size < segment->total_size)
- flags |= MSG_MORE;
+ flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
+
+ if (tcp_sw_conn->queue_recv)
+ flags |= MSG_DONTWAIT;
/* Use sendpage if we can; else fall back to sendmsg */
if (!segment->data) {
@@ -557,6 +592,10 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
conn = cls_conn->dd_data;
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
+ INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
+ tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
+
+ mutex_init(&tcp_sw_conn->sock_lock);
tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
@@ -592,11 +631,15 @@ free_conn:
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
- struct iscsi_session *session = conn->session;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct socket *sock = tcp_sw_conn->sock;
+ /*
+ * The iscsi transport class will make sure we are not called in
+ * parallel with start, stop, bind and destroys. However, this can be
+ * called twice if userspace does a stop then a destroy.
+ */
if (!sock)
return;
@@ -610,9 +653,11 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
iscsi_sw_tcp_conn_restore_callbacks(conn);
sock_put(sock->sk);
- spin_lock_bh(&session->frwd_lock);
+ iscsi_suspend_rx(conn);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
tcp_sw_conn->sock = NULL;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
sockfd_put(sock);
}
@@ -664,7 +709,6 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -684,10 +728,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
if (err)
goto free_socket;
- spin_lock_bh(&session->frwd_lock);
+ mutex_lock(&tcp_sw_conn->sock_lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
- spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -724,8 +768,15 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ if (!tcp_sw_conn->sock) {
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ return -ENOTCONN;
+ }
tcp_sw_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ mutex_unlock(&tcp_sw_conn->sock_lock);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -740,8 +791,8 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct iscsi_tcp_conn *tcp_conn;
struct sockaddr_in6 addr;
struct socket *sock;
int rc;
@@ -751,21 +802,36 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_LOCAL_PORT:
spin_lock_bh(&conn->session->frwd_lock);
- if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ if (!conn->session->leadconn) {
spin_unlock_bh(&conn->session->frwd_lock);
return -ENOTCONN;
}
- sock = tcp_sw_conn->sock;
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&conn->session->frwd_lock);
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock) {
+ rc = -ENOTCONN;
+ goto sock_unlock;
+ }
+
if (param == ISCSI_PARAM_LOCAL_PORT)
rc = kernel_getsockname(sock,
(struct sockaddr *)&addr);
else
rc = kernel_getpeername(sock,
(struct sockaddr *)&addr);
- sock_put(sock->sk);
+sock_unlock:
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
@@ -803,17 +869,21 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
}
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
- sock = tcp_sw_conn->sock;
- if (!sock) {
- spin_unlock_bh(&session->frwd_lock);
- return -ENOTCONN;
- }
- sock_hold(sock->sk);
+ /*
+ * The conn has been setup and bound, so just grab a ref
+ * incase a destroy runs while we are in the net layer.
+ */
+ iscsi_get_conn(conn->cls_conn);
spin_unlock_bh(&session->frwd_lock);
- rc = kernel_getsockname(sock,
- (struct sockaddr *)&addr);
- sock_put(sock->sk);
+ mutex_lock(&tcp_sw_conn->sock_lock);
+ sock = tcp_sw_conn->sock;
+ if (!sock)
+ rc = -ENOTCONN;
+ else
+ rc = kernel_getsockname(sock, (struct sockaddr *)&addr);
+ mutex_unlock(&tcp_sw_conn->sock_lock);
+ iscsi_put_conn(conn->cls_conn);
if (rc < 0)
return rc;
@@ -898,7 +968,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
remove_session:
iscsi_session_teardown(cls_session);
remove_host:
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
@@ -915,7 +985,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_tcp_r2tpool_free(cls_session->dd_data);
iscsi_session_teardown(cls_session);
- iscsi_host_remove(shost);
+ iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
@@ -1003,10 +1073,10 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
.eh_target_reset_handler = iscsi_eh_recover_target,
.dma_boundary = PAGE_SIZE - 1,
.slave_configure = iscsi_sw_tcp_slave_configure,
- .target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_tcp",
.this_id = -1,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport iscsi_sw_tcp_transport = {
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 791453195099..68e14a344904 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,11 @@ struct iscsi_sw_tcp_send {
struct iscsi_sw_tcp_conn {
struct socket *sock;
+ /* Taken when accessing the sock from the netlink/sysfs interface */
+ struct mutex sock_lock;
+
+ struct work_struct recvwork;
+ bool queue_recv;
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
index 74ae7fd15d8d..7dcac3b6baa7 100644
--- a/drivers/scsi/libfc/fc_encode.h
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -246,7 +246,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
&entry->type);
put_unaligned_be16(len, &entry->len);
put_unaligned_be64(lport->wwnn,
- (__be64 *)&entry->value[0]);
+ (__be64 *)&entry->value);
/* Manufacturer */
entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 841000445b9a..1d91c457527f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -825,10 +825,9 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
}
memset(ep, 0, sizeof(*ep));
- cpu = get_cpu();
+ cpu = raw_smp_processor_id();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
- put_cpu();
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
@@ -1701,6 +1700,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
if (cancel_delayed_work_sync(&ep->timeout_work)) {
FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
fc_exch_release(ep); /* release from pending timer hold */
+ return;
}
spin_lock_bh(&ep->ex_lock);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 871b11edb586..945adca5e72f 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -45,14 +45,10 @@ static struct kmem_cache *scsi_pkt_cachep;
#define FC_SRB_READ (1 << 1)
#define FC_SRB_WRITE (1 << 0)
-/*
- * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
- */
-#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
-#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
-#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
-#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
+static struct libfc_cmd_priv *libfc_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/**
* struct fc_fcp_internal - FCP layer internal data
@@ -147,8 +143,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
} else {
- per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
- put_cpu();
+ this_cpu_inc(lport->stats->FcpPktAllocFails);
}
return fsp;
}
@@ -270,8 +265,7 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
if (!fsp->seq_ptr)
return -EINVAL;
- per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
- put_cpu();
+ this_cpu_inc(fsp->lp->stats->FcpPktAborts);
fsp->state |= FC_SRB_ABORT_PENDING;
rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
@@ -440,8 +434,7 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
if (likely(fp))
return fp;
- per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
- put_cpu();
+ this_cpu_inc(lport->stats->FcpFrameAllocFails);
/* error case */
fc_fcp_can_queue_ramp_down(lport);
shost_printk(KERN_ERR, lport->host,
@@ -475,7 +468,6 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
struct scsi_cmnd *sc = fsp->cmd;
struct fc_lport *lport = fsp->lp;
- struct fc_stats *stats;
struct fc_frame_header *fh;
size_t start_offset;
size_t offset;
@@ -537,14 +529,12 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->ErrorFrames++;
+ this_cpu_inc(lport->stats->ErrorFrames);
/* per cpu count, not total count, but OK for limit */
- if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
+ if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT)
printk(KERN_WARNING "libfc: CRC error on data "
"frame for port (%6.6x)\n",
lport->port_id);
- put_cpu();
/*
* Assume the frame is total garbage.
* We may have copied it over the good part
@@ -1137,7 +1127,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
unsigned long flags;
int rc;
- fsp->cmd->SCp.ptr = (char *)fsp;
+ libfc_priv(fsp->cmd)->fsp = fsp;
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
@@ -1150,7 +1140,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
if (unlikely(rc)) {
spin_lock_irqsave(&si->scsi_queue_lock, flags);
- fsp->cmd->SCp.ptr = NULL;
+ libfc_priv(fsp->cmd)->fsp = NULL;
list_del(&fsp->list);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
@@ -1865,7 +1855,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_fcp_pkt *fsp;
int rval;
int rc = 0;
- struct fc_stats *stats;
rval = fc_remote_port_chkready(rport);
if (rval) {
@@ -1917,20 +1906,18 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
/*
* setup the data direction
*/
- stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
- stats->InputRequests++;
- stats->InputBytes += fsp->data_len;
+ this_cpu_inc(lport->stats->InputRequests);
+ this_cpu_add(lport->stats->InputBytes, fsp->data_len);
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
fsp->req_flags = FC_SRB_WRITE;
- stats->OutputRequests++;
- stats->OutputBytes += fsp->data_len;
+ this_cpu_inc(lport->stats->OutputRequests);
+ this_cpu_add(lport->stats->OutputBytes, fsp->data_len);
} else {
fsp->req_flags = 0;
- stats->ControlRequests++;
+ this_cpu_inc(lport->stats->ControlRequests);
}
- put_cpu();
/*
* send it to the lower layer
@@ -1983,7 +1970,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
fc_fcp_can_queue_ramp_up(lport);
sc_cmd = fsp->cmd;
- CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
+ libfc_priv(sc_cmd)->status = fsp->cdb_status;
switch (fsp->status_code) {
case FC_COMPLETE:
if (fsp->cdb_status == 0) {
@@ -1992,7 +1979,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
*/
sc_cmd->result = DID_OK << 16;
if (fsp->scsi_resid)
- CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
} else {
/*
* transport level I/O was ok but scsi
@@ -2025,7 +2012,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
*/
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_DATA_UNDRUN (scsi)\n");
- CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
break;
@@ -2085,7 +2072,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
- sc_cmd->SCp.ptr = NULL;
+ libfc_priv(sc_cmd)->fsp = NULL;
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
scsi_done(sc_cmd);
@@ -2121,7 +2108,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd)
si = fc_get_scsi_internal(lport);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
- fsp = CMD_SP(sc_cmd);
+ fsp = libfc_priv(sc_cmd)->fsp;
if (!fsp) {
/* command completed while scsi eh was setting up */
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 19cd4a95d354..9c02c9523c4d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -308,21 +308,21 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
stats = per_cpu_ptr(lport->stats, cpu);
- fc_stats->tx_frames += stats->TxFrames;
- fc_stats->tx_words += stats->TxWords;
- fc_stats->rx_frames += stats->RxFrames;
- fc_stats->rx_words += stats->RxWords;
- fc_stats->error_frames += stats->ErrorFrames;
- fc_stats->invalid_crc_count += stats->InvalidCRCCount;
- fc_stats->fcp_input_requests += stats->InputRequests;
- fc_stats->fcp_output_requests += stats->OutputRequests;
- fc_stats->fcp_control_requests += stats->ControlRequests;
- fcp_in_bytes += stats->InputBytes;
- fcp_out_bytes += stats->OutputBytes;
- fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
- fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
- fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
- fc_stats->link_failure_count += stats->LinkFailureCount;
+ fc_stats->tx_frames += READ_ONCE(stats->TxFrames);
+ fc_stats->tx_words += READ_ONCE(stats->TxWords);
+ fc_stats->rx_frames += READ_ONCE(stats->RxFrames);
+ fc_stats->rx_words += READ_ONCE(stats->RxWords);
+ fc_stats->error_frames += READ_ONCE(stats->ErrorFrames);
+ fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount);
+ fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests);
+ fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests);
+ fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests);
+ fcp_in_bytes += READ_ONCE(stats->InputBytes);
+ fcp_out_bytes += READ_ONCE(stats->OutputBytes);
+ fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails);
+ fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts);
+ fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails);
+ fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount);
}
fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 059dae8909ee..d95f4bcdeb2e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -83,7 +83,9 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
"%s " dbg_fmt, __func__, ##arg); \
} while (0);
-inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
+#define ISCSI_CMD_COMPL_WAIT 5
+
+inline void iscsi_conn_queue_xmit(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
@@ -91,7 +93,17 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
if (ihost->workq)
queue_work(ihost->workq, &conn->xmitwork);
}
-EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_xmit);
+
+inline void iscsi_conn_queue_recv(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ if (ihost->workq && !test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))
+ queue_work(ihost->workq, &conn->recvwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_queue_recv);
static void __iscsi_update_cmdsn(struct iscsi_session *session,
uint32_t exp_cmdsn, uint32_t max_cmdsn)
@@ -462,7 +474,7 @@ static void iscsi_free_task(struct iscsi_task *task)
if (sc) {
/* SCSI eh reuses commands to verify us */
- sc->SCp.ptr = NULL;
+ iscsi_cmd(sc)->task = NULL;
/*
* queue command may call this to free the task, so
* it will decide how to return sc to scsi-ml.
@@ -472,12 +484,18 @@ static void iscsi_free_task(struct iscsi_task *task)
}
}
-void __iscsi_get_task(struct iscsi_task *task)
+bool iscsi_get_task(struct iscsi_task *task)
{
- refcount_inc(&task->refcount);
+ return refcount_inc_not_zero(&task->refcount);
}
-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+EXPORT_SYMBOL_GPL(iscsi_get_task);
+/**
+ * __iscsi_put_task - drop the refcount on a task
+ * @task: iscsi_task to drop the refcount on
+ *
+ * The back_lock must be held when calling in case it frees the task.
+ */
void __iscsi_put_task(struct iscsi_task *task)
{
if (refcount_dec_and_test(&task->refcount))
@@ -489,10 +507,11 @@ void iscsi_put_task(struct iscsi_task *task)
{
struct iscsi_session *session = task->conn->session;
- /* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ if (refcount_dec_and_test(&task->refcount)) {
+ spin_lock_bh(&session->back_lock);
+ iscsi_free_task(task);
+ spin_unlock_bh(&session->back_lock);
+ }
}
EXPORT_SYMBOL_GPL(iscsi_put_task);
@@ -557,16 +576,19 @@ static bool cleanup_queued_task(struct iscsi_task *task)
struct iscsi_conn *conn = task->conn;
bool early_complete = false;
- /* Bad target might have completed task while it was still running */
+ /*
+ * We might have raced where we handled a R2T early and got a response
+ * but have not yet taken the task off the requeue list, then a TMF or
+ * recovery happened and so we can still see it here.
+ */
if (task->state == ISCSI_TASK_COMPLETED)
early_complete = true;
if (!list_empty(&task->running)) {
list_del_init(&task->running);
/*
- * If it's on a list but still running, this could be from
- * a bad target sending a rsp early, cleanup from a TMF, or
- * session recovery.
+ * If it's on a list but still running this could be cleanup
+ * from a TMF or session recovery.
*/
if (task->state == ISCSI_TASK_RUNNING ||
task->state == ISCSI_TASK_COMPLETED)
@@ -587,20 +609,17 @@ static bool cleanup_queued_task(struct iscsi_task *task)
}
/*
- * session frwd lock must be held and if not called for a task that is still
- * pending or from the xmit thread, then xmit thread must be suspended
+ * session back and frwd lock must be held and if not called for a task that
+ * is still pending or from the xmit thread, then xmit thread must be suspended
*/
-static void fail_scsi_task(struct iscsi_task *task, int err)
+static void __fail_scsi_task(struct iscsi_task *task, int err)
{
struct iscsi_conn *conn = task->conn;
struct scsi_cmnd *sc;
int state;
- spin_lock_bh(&conn->session->back_lock);
- if (cleanup_queued_task(task)) {
- spin_unlock_bh(&conn->session->back_lock);
+ if (cleanup_queued_task(task))
return;
- }
if (task->state == ISCSI_TASK_PENDING) {
/*
@@ -619,7 +638,15 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
sc->result = err << 16;
scsi_set_resid(sc, scsi_bufflen(sc));
iscsi_complete_task(task, state);
- spin_unlock_bh(&conn->session->back_lock);
+}
+
+static void fail_scsi_task(struct iscsi_task *task, int err)
+{
+ struct iscsi_session *session = task->conn->session;
+
+ spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, err);
+ spin_unlock_bh(&session->back_lock);
}
static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -668,17 +695,24 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
return 0;
}
+/**
+ * iscsi_alloc_mgmt_task - allocate and setup a mgmt task.
+ * @conn: iscsi conn that the task will be sent on.
+ * @hdr: iscsi pdu that will be sent.
+ * @data: buffer for data segment if needed.
+ * @data_size: length of data in bytes.
+ */
static struct iscsi_task *
-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+iscsi_alloc_mgmt_task(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size)
{
struct iscsi_session *session = conn->session;
- struct iscsi_host *ihost = shost_priv(session->host);
uint8_t opcode = hdr->opcode & ISCSI_OPCODE_MASK;
struct iscsi_task *task;
itt_t itt;
- if (session->state == ISCSI_STATE_TERMINATE)
+ if (session->state == ISCSI_STATE_TERMINATE ||
+ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
return NULL;
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -753,28 +787,57 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
task->conn->session->age);
}
- if (unlikely(READ_ONCE(conn->ping_task) == INVALID_SCSI_TASK))
- WRITE_ONCE(conn->ping_task, task);
+ return task;
+
+free_task:
+ iscsi_put_task(task);
+ return NULL;
+}
+
+/**
+ * iscsi_send_mgmt_task - Send task created with iscsi_alloc_mgmt_task.
+ * @task: iscsi task to send.
+ *
+ * On failure this returns a non-zero error code, and the driver must free
+ * the task with iscsi_put_task;
+ */
+static int iscsi_send_mgmt_task(struct iscsi_task *task)
+{
+ struct iscsi_conn *conn = task->conn;
+ struct iscsi_session *session = conn->session;
+ struct iscsi_host *ihost = shost_priv(conn->session->host);
+ int rc = 0;
if (!ihost->workq) {
- if (iscsi_prep_mgmt_task(conn, task))
- goto free_task;
+ rc = iscsi_prep_mgmt_task(conn, task);
+ if (rc)
+ return rc;
- if (session->tt->xmit_task(task))
- goto free_task;
+ rc = session->tt->xmit_task(task);
+ if (rc)
+ return rc;
} else {
list_add_tail(&task->running, &conn->mgmtqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
- return task;
+ return 0;
+}
-free_task:
- /* regular RX path uses back_lock */
- spin_lock(&session->back_lock);
- __iscsi_put_task(task);
- spin_unlock(&session->back_lock);
- return NULL;
+static int __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+{
+ struct iscsi_task *task;
+ int rc;
+
+ task = iscsi_alloc_mgmt_task(conn, hdr, data, data_size);
+ if (!task)
+ return -ENOMEM;
+
+ rc = iscsi_send_mgmt_task(task);
+ if (rc)
+ iscsi_put_task(task);
+ return rc;
}
int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -785,7 +848,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
int err = 0;
spin_lock_bh(&session->frwd_lock);
- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ if (__iscsi_conn_send_pdu(conn, hdr, data, data_size))
err = -EPERM;
spin_unlock_bh(&session->frwd_lock);
return err;
@@ -958,7 +1021,6 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
if (!rhdr) {
if (READ_ONCE(conn->ping_task))
return -EINVAL;
- WRITE_ONCE(conn->ping_task, INVALID_SCSI_TASK);
}
memset(&hdr, 0, sizeof(struct iscsi_nopout));
@@ -972,10 +1034,18 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
} else
hdr.ttt = RESERVED_ITT;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
- if (!task) {
+ task = iscsi_alloc_mgmt_task(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+ if (!task)
+ return -ENOMEM;
+
+ if (!rhdr)
+ WRITE_ONCE(conn->ping_task, task);
+
+ if (iscsi_send_mgmt_task(task)) {
if (!rhdr)
WRITE_ONCE(conn->ping_task, NULL);
+ iscsi_put_task(task);
+
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return -EIO;
} else if (!rhdr) {
@@ -1344,10 +1414,10 @@ struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
if (!task || !task->sc)
return NULL;
- if (task->sc->SCp.phase != conn->session->age) {
+ if (iscsi_cmd(task->sc)->age != conn->session->age) {
iscsi_session_printk(KERN_ERR, conn->session,
"task's session age %d, expected %d\n",
- task->sc->SCp.phase, conn->session->age);
+ iscsi_cmd(task->sc)->age, conn->session->age);
return NULL;
}
@@ -1392,8 +1462,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
return true;
}
@@ -1433,11 +1503,17 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
{
int rc;
- spin_lock_bh(&conn->session->back_lock);
-
if (!conn->task) {
- /* Take a ref so we can access it after xmit_task() */
- __iscsi_get_task(task);
+ /*
+ * Take a ref so we can access it after xmit_task().
+ *
+ * This should never fail because the failure paths will have
+ * stopped the xmit thread.
+ */
+ if (!iscsi_get_task(task)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
} else {
/* Already have a ref from when we failed to send it last call */
conn->task = NULL;
@@ -1448,22 +1524,20 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* case a bad target sends a cmd rsp before we have handled the task.
*/
if (was_requeue)
- __iscsi_put_task(task);
+ iscsi_put_task(task);
/*
* Do this after dropping the extra ref because if this was a requeue
* it's removed from that list and cleanup_queued_task would miss it.
*/
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
/*
* Save the task and ref in case we weren't cleaning up this
* task and get woken up again.
*/
conn->task = task;
- spin_unlock_bh(&conn->session->back_lock);
return -ENODATA;
}
- spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
@@ -1471,20 +1545,16 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
if (!rc) {
/* done with this task */
task->last_xfer = jiffies;
- }
- /* regular RX path uses back_lock */
- spin_lock(&conn->session->back_lock);
- if (rc && task->state == ISCSI_TASK_RUNNING) {
+ } else {
/*
* get an extra ref that is released next time we access it
* as conn->task above.
*/
- __iscsi_get_task(task);
+ iscsi_get_task(task);
conn->task = task;
}
- __iscsi_put_task(task);
- spin_unlock(&conn->session->back_lock);
+ iscsi_put_task(task);
return rc;
}
@@ -1512,7 +1582,7 @@ void iscsi_requeue_task(struct iscsi_task *task)
*/
iscsi_put_task(task);
}
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1532,7 +1602,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->frwd_lock);
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1566,6 +1636,28 @@ check_mgmt:
goto done;
}
+check_requeue:
+ while (!list_empty(&conn->requeue)) {
+ /*
+ * we always do fastlogout - conn stop code will clean up.
+ */
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+ task = list_entry(conn->requeue.next, struct iscsi_task,
+ running);
+
+ if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
+ break;
+
+ list_del_init(&task->running);
+ rc = iscsi_xmit_task(conn, task, true);
+ if (rc)
+ goto done;
+ if (!list_empty(&conn->mgmtqueue))
+ goto check_mgmt;
+ }
+
/* process pending command queue */
while (!list_empty(&conn->cmdqueue)) {
task = list_entry(conn->cmdqueue.next, struct iscsi_task,
@@ -1593,28 +1685,10 @@ check_mgmt:
*/
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
+ if (!list_empty(&conn->requeue))
+ goto check_requeue;
}
- while (!list_empty(&conn->requeue)) {
- /*
- * we always do fastlogout - conn stop code will clean up.
- */
- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
- break;
-
- task = list_entry(conn->requeue.next, struct iscsi_task,
- running);
-
- if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
- break;
-
- list_del_init(&task->running);
- rc = iscsi_xmit_task(conn, task, true);
- if (rc)
- goto done;
- if (!list_empty(&conn->mgmtqueue))
- goto check_mgmt;
- }
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1645,8 +1719,8 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,
(void *) &task, sizeof(void *)))
return NULL;
- sc->SCp.phase = conn->session->age;
- sc->SCp.ptr = (char *) task;
+ iscsi_cmd(sc)->age = conn->session->age;
+ iscsi_cmd(sc)->task = task;
refcount_set(&task->refcount, 1);
task->state = ISCSI_TASK_PENDING;
@@ -1683,7 +1757,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
struct iscsi_task *task = NULL;
sc->result = 0;
- sc->SCp.ptr = NULL;
+ iscsi_cmd(sc)->task = NULL;
ihost = shost_priv(host);
@@ -1746,7 +1820,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto fault;
}
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE << 16;
goto fault;
@@ -1781,7 +1855,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
}
} else {
list_add_tail(&task->running, &conn->cmdqueue);
- iscsi_conn_queue_work(conn);
+ iscsi_conn_queue_xmit(conn);
}
session->queued_cmdsn++;
@@ -1842,11 +1916,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
__must_hold(&session->frwd_lock)
{
struct iscsi_session *session = conn->session;
- struct iscsi_task *task;
- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
- NULL, 0);
- if (!task) {
+ if (__iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0)) {
spin_unlock_bh(&session->frwd_lock);
iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
@@ -1894,6 +1965,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
struct iscsi_task *task;
int i;
+restart_cmd_loop:
spin_lock_bh(&session->back_lock);
for (i = 0; i < session->cmds_max; i++) {
task = session->cmds[i];
@@ -1902,22 +1974,25 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
if (lun != -1 && lun != task->sc->device->lun)
continue;
-
- __iscsi_get_task(task);
- spin_unlock_bh(&session->back_lock);
+ /*
+ * The cmd is completing but if this is called from an eh
+ * callout path then when we return scsi-ml owns the cmd. Wait
+ * for the completion path to finish freeing the cmd.
+ */
+ if (!iscsi_get_task(task)) {
+ spin_unlock_bh(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ spin_lock_bh(&session->frwd_lock);
+ goto restart_cmd_loop;
+ }
ISCSI_DBG_SESSION(session,
"failing sc %p itt 0x%x state %d\n",
task->sc, task->itt, task->state);
- fail_scsi_task(task, error);
-
- spin_unlock_bh(&session->frwd_lock);
- iscsi_put_task(task);
- spin_lock_bh(&session->frwd_lock);
-
- spin_lock_bh(&session->back_lock);
+ __fail_scsi_task(task, error);
+ __iscsi_put_task(task);
}
-
spin_unlock_bh(&session->back_lock);
}
@@ -1935,14 +2010,14 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->frwd_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
/**
* iscsi_suspend_tx - suspend iscsi_data_xmit
- * @conn: iscsi conn tp stop processing IO on.
+ * @conn: iscsi conn to stop processing IO on.
*
* This function sets the suspend bit to prevent iscsi_data_xmit
* from sending new IO, and if work is queued on the xmit thread
@@ -1953,18 +2028,33 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
- flush_workqueue(ihost->workq);
+ flush_work(&conn->xmitwork);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- iscsi_conn_queue_work(conn);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ iscsi_conn_queue_xmit(conn);
}
+/**
+ * iscsi_suspend_rx - Prevent recvwork from running again.
+ * @conn: iscsi conn to stop.
+ */
+void iscsi_suspend_rx(struct iscsi_conn *conn)
+{
+ struct Scsi_Host *shost = conn->session->host;
+ struct iscsi_host *ihost = shost_priv(shost);
+
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ if (ihost->workq)
+ flush_work(&conn->recvwork);
+}
+EXPORT_SYMBOL_GPL(iscsi_suspend_rx);
+
/*
* We want to make sure a ping is in flight. It has timed out.
* And we are not busy processing a pdu that is making
@@ -1997,7 +2087,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
spin_lock_bh(&session->frwd_lock);
spin_lock(&session->back_lock);
- task = (struct iscsi_task *)sc->SCp.ptr;
+ task = iscsi_cmd(sc)->task;
if (!task) {
/*
* Raced with completion. Blk layer has taken ownership
@@ -2007,7 +2097,16 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
spin_unlock(&session->back_lock);
goto done;
}
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ /*
+ * Racing with the completion path right now, so give it more
+ * time so that path can complete it like normal.
+ */
+ rc = BLK_EH_RESET_TIMER;
+ task = NULL;
+ spin_unlock(&session->back_lock);
+ goto done;
+ }
spin_unlock(&session->back_lock);
if (session->state != ISCSI_STATE_LOGGED_IN) {
@@ -2214,6 +2313,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
+ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
if (!is_active) {
/*
* if logout timed out before userspace could even send a PDU
@@ -2254,13 +2355,14 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
ISCSI_DBG_EH(session, "aborting sc %p\n", sc);
+completion_check:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
/*
* if session was ISCSI_STATE_IN_RECOVERY then we may not have
* got the command.
*/
- if (!sc->SCp.ptr) {
+ if (!iscsi_cmd(sc)->task) {
ISCSI_DBG_EH(session, "sc never reached iscsi layer or "
"it completed.\n");
spin_unlock_bh(&session->frwd_lock);
@@ -2273,7 +2375,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
* then let the host reset code handle this
*/
if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
- sc->SCp.phase != session->age) {
+ iscsi_cmd(sc)->age != session->age) {
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
ISCSI_DBG_EH(session, "failing abort due to dropped "
@@ -2282,7 +2384,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
}
spin_lock(&session->back_lock);
- task = (struct iscsi_task *)sc->SCp.ptr;
+ task = iscsi_cmd(sc)->task;
if (!task || !task->sc) {
/* task completed before time out */
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
@@ -2293,13 +2395,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
return SUCCESS;
}
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
+ /* We are just about to call iscsi_free_task so wait for it. */
+ udelay(ISCSI_CMD_COMPL_WAIT);
+ goto completion_check;
+ }
+
+ ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
conn = session->leadconn;
iscsi_get_conn(conn->cls_conn);
conn->eh_abort_cnt++;
age = session->age;
-
- ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
- __iscsi_get_task(task);
spin_unlock(&session->back_lock);
if (task->state == ISCSI_TASK_PENDING) {
@@ -2798,11 +2907,9 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
ihost = shost_priv(shost);
if (xmit_can_sleep) {
- snprintf(ihost->workq_name, sizeof(ihost->workq_name),
- "iscsi_q_%d", shost->host_no);
- ihost->workq = alloc_workqueue("%s",
+ ihost->workq = alloc_workqueue("iscsi_q_%d",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, ihost->workq_name);
+ 1, shost->host_no);
if (!ihost->workq)
goto free_host;
}
@@ -2827,11 +2934,12 @@ static void iscsi_notify_host_removed(struct iscsi_cls_session *cls_session)
/**
* iscsi_host_remove - remove host and sessions
* @shost: scsi host
+ * @is_shutdown: true if called from a driver shutdown callout
*
* If there are any sessions left, this will initiate the removal and wait
* for the completion.
*/
-void iscsi_host_remove(struct Scsi_Host *shost)
+void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown)
{
struct iscsi_host *ihost = shost_priv(shost);
unsigned long flags;
@@ -2840,7 +2948,11 @@ void iscsi_host_remove(struct Scsi_Host *shost)
ihost->state = ISCSI_HOST_REMOVED;
spin_unlock_irqrestore(&ihost->lock, flags);
- iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ if (!is_shutdown)
+ iscsi_host_for_each_session(shost, iscsi_notify_host_removed);
+ else
+ iscsi_host_for_each_session(shost, iscsi_force_destroy_session);
+
wait_event_interruptible(ihost->session_removal_wq,
ihost->num_sessions == 0);
if (signal_pending(current))
@@ -3040,13 +3152,13 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
char *data;
+ int err;
- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+ cls_conn = iscsi_alloc_conn(cls_session, sizeof(*conn) + dd_size,
conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
- memset(conn, 0, sizeof(*conn) + dd_size);
conn->dd_data = cls_conn->dd_data + sizeof(*conn);
conn->session = session;
@@ -3078,13 +3190,21 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
goto login_task_data_alloc_fail;
conn->login_task->data = conn->data = data;
+ err = iscsi_add_conn(cls_conn);
+ if (err)
+ goto login_task_add_dev_fail;
+
return cls_conn;
+login_task_add_dev_fail:
+ free_pages((unsigned long) conn->data,
+ get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
+
login_task_data_alloc_fail:
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
login_task_alloc_fail:
- iscsi_destroy_conn(cls_conn);
+ iscsi_put_conn(cls_conn);
return NULL;
}
EXPORT_SYMBOL_GPL(iscsi_conn_setup);
@@ -3100,8 +3220,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- char *tmp_persistent_address = conn->persistent_address;
- char *tmp_local_ipaddr = conn->local_ipaddr;
+
+ iscsi_remove_conn(cls_conn);
del_timer_sync(&conn->transport_timer);
@@ -3123,6 +3243,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
+ kfree(conn->persistent_address);
+ kfree(conn->local_ipaddr);
/* regular RX path uses back_lock */
spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
@@ -3133,9 +3255,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
- iscsi_destroy_conn(cls_conn);
- kfree(tmp_persistent_address);
- kfree(tmp_local_ipaddr);
+ iscsi_put_conn(cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
@@ -3311,6 +3431,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
+
+ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
spin_unlock_bh(&session->frwd_lock);
/*
@@ -3323,8 +3445,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
/*
* Unblock xmitworker(), Login Phase will pass through.
*/
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2e9ffe3d1a55..c182aa83f2c9 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -558,7 +558,11 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
return 0;
}
task->last_xfer = jiffies;
- __iscsi_get_task(task);
+ if (!iscsi_get_task(task)) {
+ spin_unlock(&session->back_lock);
+ /* Let the path that got the early rsp complete it */
+ return 0;
+ }
tcp_conn = conn->dd_data;
rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
@@ -927,7 +931,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
*/
conn->last_recv = jiffies;
- if (unlikely(conn->suspend_rx)) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
*status = ISCSI_TCP_SUSPENDED;
return 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index a315715b3622..d35c9296f738 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -52,8 +52,6 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_DATA_OVERRUN:
case SAS_QUEUE_FULL:
case SAS_DEVICE_UNKNOWN:
- case SAS_SG_ERR:
- return AC_ERR_INVALID;
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
pr_warn("%s: Saw error %d. What to do?\n",
@@ -181,14 +179,9 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->task_proto = SAS_PROTOCOL_STP;
task->task_done = sas_ata_task_done;
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ ||
- qc->tf.command == ATA_CMD_FPDMA_RECV ||
- qc->tf.command == ATA_CMD_FPDMA_SEND ||
- qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
- /* Need to zero out the tag libata assigned us */
+ /* For NCQ commands, zero out the tag libata assigned us */
+ if (ata_is_ncq(qc->tf.protocol))
qc->tf.nsect = 0;
- }
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
task->uldd_task = qc;
@@ -197,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
task->data_dir = qc->dma_dir;
- } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+ } else if (!ata_is_data(qc->tf.protocol)) {
task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
@@ -209,7 +202,6 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
}
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
- task->task_state_flags = SAS_TASK_STATE_PENDING;
qc->lldd_task = task;
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
@@ -366,22 +358,14 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev,
return r;
}
-static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline)
{
- int ret = 0, res;
- struct sas_phy *phy;
- struct ata_port *ap = link->ap;
+ struct sata_device *sata_dev = &dev->sata_dev;
int (*check_ready)(struct ata_link *link);
- struct domain_device *dev = ap->private_data;
- struct sas_internal *i = dev_to_sas_internal(dev);
-
- res = i->dft->lldd_I_T_nexus_reset(dev);
- if (res == -ENODEV)
- return res;
-
- if (res != TMF_RESP_FUNC_COMPLETE)
- sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+ struct ata_port *ap = sata_dev->ap;
+ struct ata_link *link = &ap->link;
+ struct sas_phy *phy;
+ int ret;
phy = sas_get_local_phy(dev);
if (scsi_is_sas_phy_local(phy))
@@ -394,6 +378,27 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
if (ret && ret != -EAGAIN)
sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sas_ata_wait_after_reset);
+
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct domain_device *dev = ap->private_data;
+ struct sas_internal *i = dev_to_sas_internal(dev);
+ int ret;
+
+ ret = i->dft->lldd_I_T_nexus_reset(dev);
+ if (ret == -ENODEV)
+ return ret;
+
+ if (ret != TMF_RESP_FUNC_COMPLETE)
+ sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
+
+ ret = sas_ata_wait_after_reset(dev, deadline);
+
*class = dev->sata_dev.class;
ap->cbl = ATA_CBL_SATA;
@@ -783,8 +788,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
sas_enable_revalidation(sas_ha);
}
-void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
- struct list_head *done_q)
+void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
struct domain_device *eh_dev;
@@ -856,3 +860,11 @@ void sas_ata_wait_eh(struct domain_device *dev)
ap = dev->sata_dev.ap;
ata_port_wait_eh(ap);
}
+
+int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
+{
+ struct sas_tmf_task tmf_task = {};
+ return sas_execute_tmf(device, fis, sizeof(struct host_to_dev_fis),
+ force_phy_id, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_execute_ata_cmd);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 12e1e36d7c04..d5bc1314c341 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -8,7 +8,6 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/async.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"
@@ -546,19 +545,17 @@ static void sas_chain_event(int event, unsigned long *pending,
}
}
-int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
+void sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
{
struct sas_discovery *disc;
if (!port)
- return 0;
+ return;
disc = &port->disc;
BUG_ON(ev >= DISC_NUM_EVENTS);
sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
-
- return 0;
}
/**
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index f703115e7a25..f3a17191a4fe 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -10,29 +10,26 @@
#include <scsi/scsi_host.h>
#include "sas_internal.h"
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- /* it's added to the defer_q when draining so return succeed */
- int rc = 1;
-
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
- return 0;
+ return false;
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
- } else
- rc = queue_work(ha->event_q, &sw->work);
+ return true;
+ }
- return rc;
+ return queue_work(ha->event_q, &sw->work);
}
-static int sas_queue_event(int event, struct sas_work *work,
+static bool sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
unsigned long flags;
- int rc;
+ bool rc;
spin_lock_irqsave(&ha->lock, flags);
rc = sas_queue_work(ha, work);
@@ -41,12 +38,24 @@ static int sas_queue_event(int event, struct sas_work *work,
return rc;
}
-
-void __sas_drain_work(struct sas_ha_struct *ha)
+void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
- int ret;
+ spin_lock_irq(&ha->lock);
+ list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+ list_del_init(&sw->drain_node);
+
+ if (!sas_queue_work(ha, sw)) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(to_asd_sas_event(&sw->work));
+ }
+ }
+ spin_unlock_irq(&ha->lock);
+}
+
+void __sas_drain_work(struct sas_ha_struct *ha)
+{
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
@@ -55,16 +64,8 @@ void __sas_drain_work(struct sas_ha_struct *ha)
drain_workqueue(ha->event_q);
drain_workqueue(ha->disco_q);
- spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
- list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
- list_del_init(&sw->drain_node);
- ret = sas_queue_work(ha, sw);
- if (ret != 1)
- sas_free_event(to_asd_sas_event(&sw->work));
-
- }
- spin_unlock_irq(&ha->lock);
+ sas_queue_deferred_work(ha);
}
int sas_drain_work(struct sas_ha_struct *ha)
@@ -104,11 +105,15 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
if (!test_and_clear_bit(ev, &d->pending))
continue;
- if (list_empty(&port->phy_list))
+ spin_lock(&port->phy_list_lock);
+ if (list_empty(&port->phy_list)) {
+ spin_unlock(&port->phy_list_lock);
continue;
+ }
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
port_phy_el);
+ spin_unlock(&port->phy_list_lock);
sas_notify_port_event(sas_phy,
PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
@@ -119,61 +124,93 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
static void sas_port_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_port_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
static void sas_phy_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_phy_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
-int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
- gfp_t gfp_flags)
+/* defer works of new phys during suspend */
+static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
+{
+ struct sas_ha_struct *ha = phy->ha;
+ unsigned long flags;
+ bool deferred = false;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
+ struct sas_work *sw = &ev->work;
+
+ list_add_tail(&sw->drain_node, &ha->defer_q);
+ deferred = true;
+ }
+ spin_unlock_irqrestore(&ha->lock, flags);
+ return deferred;
+}
+
+void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags)
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
- int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
- return -ENOMEM;
+ return;
+
+ /* Call pm_runtime_put() with pairs in sas_port_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
- sas_free_event(ev);
+ if (sas_defer_event(phy, ev))
+ return;
- return ret;
+ if (!sas_queue_event(event, &ev->work, ha)) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(ev);
+ }
}
EXPORT_SYMBOL_GPL(sas_notify_port_event);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
- gfp_t gfp_flags)
+void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags)
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
- int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
- return -ENOMEM;
+ return;
+
+ /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
- ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
- sas_free_event(ev);
+ if (sas_defer_event(phy, ev))
+ return;
- return ret;
+ if (!sas_queue_event(event, &ev->work, ha)) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(ev);
+ }
}
EXPORT_SYMBOL_GPL(sas_notify_phy_event);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index c2150a818423..5ce251830104 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -28,26 +28,6 @@ static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
/* ---------- SMP task management ---------- */
-static void smp_task_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
-static void smp_task_done(struct sas_task *task)
-{
- del_timer(&task->slow_task->timer);
- complete(&task->slow_task->completion);
-}
-
/* Give it some long enough timeout. In seconds. */
#define SMP_TIMEOUT 10
@@ -58,7 +38,9 @@ static int smp_execute_task_sg(struct domain_device *dev,
struct sas_task *task = NULL;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
+ struct sas_ha_struct *ha = dev->port->ha;
+ pm_runtime_get_sync(ha->dev);
mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
if (test_bit(SAS_DEV_GONE, &dev->state)) {
@@ -76,16 +58,16 @@ static int smp_execute_task_sg(struct domain_device *dev,
task->smp_task.smp_req = *req;
task->smp_task.smp_resp = *resp;
- task->task_done = smp_task_done;
+ task->task_done = sas_task_internal_done;
- task->slow_task->timer.function = smp_task_timedout;
+ task->slow_task->timer.function = sas_task_internal_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
- del_timer(&task->slow_task->timer);
+ del_timer_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
@@ -131,6 +113,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
mutex_unlock(&dev->ex_dev.cmd_mutex);
+ pm_runtime_put_sync(ha->dev);
BUG_ON(retry == 3 && task != NULL);
sas_free_task(task);
@@ -192,13 +175,13 @@ static enum sas_device_type to_dev_type(struct discover_resp *dr)
return dr->attached_dev_type;
}
-static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
+ struct smp_disc_resp *disc_resp)
{
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
- struct smp_resp *resp = rsp;
- struct discover_resp *dr = &resp->disc;
+ struct discover_resp *dr = &disc_resp->disc;
struct sas_ha_struct *ha = dev->port->ha;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
@@ -215,7 +198,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
BUG_ON(!phy->phy);
}
- switch (resp->result) {
+ switch (disc_resp->result) {
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
break;
@@ -364,12 +347,13 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
}
#define DISCOVER_REQ_SIZE 16
-#define DISCOVER_RESP_SIZE 56
+#define DISCOVER_RESP_SIZE sizeof(struct smp_disc_resp)
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
- u8 *disc_resp, int single)
+ struct smp_disc_resp *disc_resp,
+ int single)
{
- struct discover_resp *dr;
+ struct discover_resp *dr = &disc_resp->disc;
int res;
disc_req[9] = single;
@@ -378,7 +362,6 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
- dr = &((struct smp_resp *)disc_resp)->disc;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
pr_notice("Found loopback topology, just ignore it!\n");
return 0;
@@ -392,7 +375,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
struct expander_device *ex = &dev->ex_dev;
int res = 0;
u8 *disc_req;
- u8 *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
if (!disc_req)
@@ -446,27 +429,14 @@ static int sas_expander_discover(struct domain_device *dev)
#define MAX_EXPANDER_PHYS 128
-static void ex_assign_report_general(struct domain_device *dev,
- struct smp_resp *resp)
-{
- struct report_general_resp *rg = &resp->rg;
-
- dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
- dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
- dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
- dev->ex_dev.t2t_supp = rg->t2t_supp;
- dev->ex_dev.conf_route_table = rg->conf_route_table;
- dev->ex_dev.configuring = rg->configuring;
- memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
-}
-
#define RG_REQ_SIZE 8
-#define RG_RESP_SIZE 32
+#define RG_RESP_SIZE sizeof(struct smp_rg_resp)
static int sas_ex_general(struct domain_device *dev)
{
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
+ struct report_general_resp *rg;
int res;
int i;
@@ -497,7 +467,15 @@ static int sas_ex_general(struct domain_device *dev)
goto out;
}
- ex_assign_report_general(dev, rg_resp);
+ rg = &rg_resp->rg;
+ dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
+ dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
+ dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
+ dev->ex_dev.t2t_supp = rg->t2t_supp;
+ dev->ex_dev.conf_route_table = rg->conf_route_table;
+ dev->ex_dev.configuring = rg->configuring;
+ memcpy(dev->ex_dev.enclosure_logical_id,
+ rg->enclosure_logical_id, 8);
if (dev->ex_dev.configuring) {
pr_debug("RG: ex %016llx self-configuring...\n",
@@ -698,10 +676,10 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
#ifdef CONFIG_SCSI_SAS_ATA
#define RPS_REQ_SIZE 16
-#define RPS_RESP_SIZE 60
+#define RPS_RESP_SIZE sizeof(struct smp_rps_resp)
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp)
+ struct smp_rps_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -1674,7 +1652,7 @@ out_err:
/* ---------- Domain revalidation ---------- */
static int sas_get_phy_discover(struct domain_device *dev,
- int phy_id, struct smp_resp *disc_resp)
+ int phy_id, struct smp_disc_resp *disc_resp)
{
int res;
u8 *disc_req;
@@ -1690,10 +1668,8 @@ static int sas_get_phy_discover(struct domain_device *dev,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
goto out;
- else if (disc_resp->result != SMP_RESP_FUNC_ACC) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
res = disc_resp->result;
- goto out;
- }
out:
kfree(disc_req);
return res;
@@ -1703,7 +1679,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
int phy_id, int *pcc)
{
int res;
- struct smp_resp *disc_resp;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
@@ -1721,19 +1697,17 @@ static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type)
{
int res;
- struct smp_resp *disc_resp;
- struct discover_resp *dr;
+ struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
return -ENOMEM;
- dr = &disc_resp->disc;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (res == 0) {
memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
SAS_ADDR_SIZE);
- *type = to_dev_type(dr);
+ *type = to_dev_type(&disc_resp->disc);
if (*type == 0)
memset(sas_addr, 0, SAS_ADDR_SIZE);
}
@@ -1777,7 +1751,7 @@ static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
{
int res;
u8 *rg_req;
- struct smp_resp *rg_resp;
+ struct smp_rg_resp *rg_resp;
rg_req = alloc_smp_req(RG_REQ_SIZE);
if (!rg_req)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index b640e09af6a4..e4f77072a58d 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -362,6 +362,7 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
int i;
set_bit(SAS_HA_REGISTERED, &ha->state);
+ set_bit(SAS_HA_RESUMING, &ha->state);
/* clear out any stale link events/data from the suspension path */
for (i = 0; i < ha->num_phys; i++) {
@@ -387,7 +388,31 @@ static int phys_suspended(struct sas_ha_struct *ha)
return rc;
}
-void sas_resume_ha(struct sas_ha_struct *ha)
+static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+ struct domain_device *dev = port->port_dev;
+
+ if (dev && dev_is_expander(dev->dev_type)) {
+ struct asd_sas_phy *first_phy;
+
+ spin_lock(&port->phy_list_lock);
+ first_phy = list_first_entry_or_null(
+ &port->phy_list, struct asd_sas_phy,
+ port_phy_el);
+ spin_unlock(&port->phy_list_lock);
+
+ if (first_phy)
+ sas_notify_port_event(first_phy,
+ PORTE_BROADCAST_RCVD, GFP_KERNEL);
+ }
+ }
+}
+
+static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
{
const unsigned long tmo = msecs_to_jiffies(25000);
int i;
@@ -417,10 +442,30 @@ void sas_resume_ha(struct sas_ha_struct *ha)
* flush out disks that did not return
*/
scsi_unblock_requests(ha->core.shost);
- sas_drain_work(ha);
+ if (drain)
+ sas_drain_work(ha);
+ clear_bit(SAS_HA_RESUMING, &ha->state);
+
+ sas_queue_deferred_work(ha);
+ /* send event PORTE_BROADCAST_RCVD to identify some new inserted
+ * disks for expander
+ */
+ sas_resume_insert_broadcast_ha(ha);
+}
+
+void sas_resume_ha(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, true);
}
EXPORT_SYMBOL(sas_resume_ha);
+/* A no-sync variant, which does not call sas_drain_ha(). */
+void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, false);
+}
+EXPORT_SYMBOL(sas_resume_ha_no_sync);
+
void sas_suspend_ha(struct sas_ha_struct *ha)
{
int i;
@@ -486,6 +531,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->reset_result = 0;
@@ -499,6 +545,7 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
if (rc == 0)
rc = d->reset_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
@@ -513,6 +560,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (!d)
return -ENOMEM;
+ pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->enable_result = 0;
@@ -526,6 +574,7 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
if (rc == 0)
rc = d->enable_result;
mutex_unlock(&d->event_lock);
+ pm_runtime_put_sync(ha->dev);
return rc;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index d7a1fb5c10c6..8d0ad3abc7b5 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -14,6 +14,7 @@
#include <scsi/scsi_transport_sas.h>
#include <scsi/libsas.h>
#include <scsi/sas_ata.h>
+#include <linux/pm_runtime.h>
#ifdef pr_fmt
#undef pr_fmt
@@ -56,6 +57,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
+void sas_queue_deferred_work(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
void sas_deform_port(struct asd_sas_phy *phy, int gone);
@@ -65,7 +67,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
-int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
+bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -76,14 +78,12 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
- gfp_t flags);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
int sas_ex_phy_discover(struct domain_device *dev, int single);
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
- struct smp_resp *rps_resp);
+ struct smp_rps_resp *rps_resp);
int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
@@ -93,6 +93,12 @@ void sas_destruct_devices(struct asd_sas_port *port);
extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
+void sas_task_internal_done(struct sas_task *task);
+void sas_task_internal_timedout(struct timer_list *t);
+int sas_execute_tmf(struct domain_device *device, void *parameter,
+ int para_len, int force_phy_id,
+ struct sas_tmf_task *tmf);
+
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
#else
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 67b429dcf1ff..11599c0e3fc3 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -67,6 +67,34 @@ static void sas_resume_port(struct asd_sas_phy *phy)
sas_discover_event(port, DISCE_RESUME);
}
+static void sas_form_port_add_phy(struct asd_sas_port *port,
+ struct asd_sas_phy *phy, bool wideport)
+{
+ list_add_tail(&phy->port_phy_el, &port->phy_list);
+ sas_phy_set_target(phy, port->port_dev);
+ phy->port = port;
+ port->num_phys++;
+ port->phy_mask |= (1U << phy->id);
+
+ if (wideport)
+ pr_debug("phy%d matched wide port%d\n", phy->id,
+ port->id);
+ else
+ memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
+
+ if (*(u64 *)port->attached_sas_addr == 0) {
+ port->class = phy->class;
+ memcpy(port->attached_sas_addr, phy->attached_sas_addr,
+ SAS_ADDR_SIZE);
+ port->iproto = phy->iproto;
+ port->tproto = phy->tproto;
+ port->oob_mode = phy->oob_mode;
+ port->linkrate = phy->linkrate;
+ } else {
+ port->linkrate = max(port->linkrate, phy->linkrate);
+ }
+}
+
/**
* sas_form_port - add this phy to a port
* @phy: the phy of interest
@@ -79,7 +107,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
int i;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
- struct domain_device *port_dev;
+ struct domain_device *port_dev = NULL;
struct sas_internal *si =
to_sas_internal(sas_ha->core.shost->transportt);
unsigned long flags;
@@ -110,8 +138,9 @@ static void sas_form_port(struct asd_sas_phy *phy)
if (*(u64 *) port->sas_addr &&
phy_is_wideport_member(port, phy) && port->num_phys > 0) {
/* wide port */
- pr_debug("phy%d matched wide port%d\n", phy->id,
- port->id);
+ port_dev = port->port_dev;
+ sas_form_port_add_phy(port, phy, true);
+ spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
@@ -122,40 +151,22 @@ static void sas_form_port(struct asd_sas_phy *phy)
port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock);
if (*(u64 *)port->sas_addr == 0
- && port->num_phys == 0) {
- memcpy(port->sas_addr, phy->sas_addr,
- SAS_ADDR_SIZE);
+ && port->num_phys == 0) {
+ port_dev = port->port_dev;
+ sas_form_port_add_phy(port, phy, false);
+ spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
}
- }
- if (i >= sas_ha->num_phys) {
- pr_err("%s: couldn't find a free port, bug?\n", __func__);
- spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
- return;
+ if (i >= sas_ha->num_phys) {
+ pr_err("%s: couldn't find a free port, bug?\n",
+ __func__);
+ spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
+ return;
+ }
}
-
- /* add the phy to the port */
- port_dev = port->port_dev;
- list_add_tail(&phy->port_phy_el, &port->phy_list);
- sas_phy_set_target(phy, port_dev);
- phy->port = port;
- port->num_phys++;
- port->phy_mask |= (1U << phy->id);
-
- if (*(u64 *)port->attached_sas_addr == 0) {
- port->class = phy->class;
- memcpy(port->attached_sas_addr, phy->attached_sas_addr,
- SAS_ADDR_SIZE);
- port->iproto = phy->iproto;
- port->tproto = phy->tproto;
- port->oob_mode = phy->oob_mode;
- port->linkrate = phy->linkrate;
- } else
- port->linkrate = max(port->linkrate, phy->linkrate);
- spin_unlock(&port->phy_list_lock);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index d337fdf1b9ca..a36fa1c128a8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -37,7 +37,8 @@
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
- int hs = 0, stat = 0;
+ enum scsi_host_status hs = DID_OK;
+ enum exec_status stat = SAS_SAM_STAT_GOOD;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
@@ -66,9 +67,6 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
case SAS_DEVICE_UNKNOWN:
hs = DID_BAD_TARGET;
break;
- case SAS_SG_ERR:
- hs = DID_PARITY;
- break;
case SAS_OPEN_REJECT:
if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
hs = DID_SOFT_ERROR; /* retry */
@@ -82,10 +80,10 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
- case SAM_STAT_CHECK_CONDITION:
+ case SAS_SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
- stat = SAM_STAT_CHECK_CONDITION;
+ stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;
@@ -315,11 +313,13 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
pr_notice("%s: task 0x%p failed to abort\n",
__func__, task);
return TASK_ABORT_FAILED;
+ default:
+ pr_notice("%s: task 0x%p result code %d not handled\n",
+ __func__, task, res);
}
-
}
}
- return res;
+ return TASK_ABORT_FAILED;
}
static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
@@ -756,7 +756,7 @@ retry:
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
- sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
+ sas_ata_eh(shost, &eh_work_q);
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
@@ -872,7 +872,8 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
- return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
+ return ata_change_queue_depth(dev->sata_dev.ap,
+ sas_to_ata_dev(dev), sdev, depth);
if (!sdev->tagged_supported)
depth = 1;
@@ -893,6 +894,315 @@ int sas_bios_param(struct scsi_device *scsi_dev,
}
EXPORT_SYMBOL_GPL(sas_bios_param);
+void sas_task_internal_done(struct sas_task *task)
+{
+ del_timer(&task->slow_task->timer);
+ complete(&task->slow_task->completion);
+}
+
+void sas_task_internal_timedout(struct timer_list *t)
+{
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
+ bool is_completed = true;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ is_completed = false;
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ if (!is_completed)
+ complete(&task->slow_task->completion);
+}
+
+#define TASK_TIMEOUT (20 * HZ)
+#define TASK_RETRY 3
+
+static int sas_execute_internal_abort(struct domain_device *device,
+ enum sas_internal_abort type, u16 tag,
+ unsigned int qid, void *data)
+{
+ struct sas_ha_struct *ha = device->port->ha;
+ struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
+ struct sas_task *task = NULL;
+ int res, retry;
+
+ for (retry = 0; retry < TASK_RETRY; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = device;
+ task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
+ task->task_done = sas_task_internal_done;
+ task->slow_task->timer.function = sas_task_internal_timedout;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
+ add_timer(&task->slow_task->timer);
+
+ task->abort_task.tag = tag;
+ task->abort_task.type = type;
+ task->abort_task.qid = qid;
+
+ res = i->dft->lldd_execute_task(task, GFP_KERNEL);
+ if (res) {
+ del_timer_sync(&task->slow_task->timer);
+ pr_err("Executing internal abort failed %016llx (%d)\n",
+ SAS_ADDR(device->sas_addr), res);
+ break;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+ res = TMF_RESP_FUNC_FAILED;
+
+ /* Even if the internal abort timed out, return direct. */
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ bool quit = true;
+
+ if (i->dft->lldd_abort_timeout)
+ quit = i->dft->lldd_abort_timeout(task, data);
+ else
+ pr_err("Internal abort: timeout %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EIO;
+ if (quit)
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_SAM_STAT_GOOD) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ break;
+ }
+
+ pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
+ SAS_ADDR(device->sas_addr), task->task_status.resp,
+ task->task_status.stat);
+ sas_free_task(task);
+ task = NULL;
+ }
+ BUG_ON(retry == TASK_RETRY && task != NULL);
+ sas_free_task(task);
+ return res;
+}
+
+int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
+ unsigned int qid, void *data)
+{
+ return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
+ tag, qid, data);
+}
+EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
+
+int sas_execute_internal_abort_dev(struct domain_device *device,
+ unsigned int qid, void *data)
+{
+ return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
+ SCSI_NO_TAG, qid, data);
+}
+EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
+
+int sas_execute_tmf(struct domain_device *device, void *parameter,
+ int para_len, int force_phy_id,
+ struct sas_tmf_task *tmf)
+{
+ struct sas_task *task;
+ struct sas_internal *i =
+ to_sas_internal(device->port->ha->core.shost->transportt);
+ int res, retry;
+
+ for (retry = 0; retry < TASK_RETRY; retry++) {
+ task = sas_alloc_slow_task(GFP_KERNEL);
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = device;
+ task->task_proto = device->tproto;
+
+ if (dev_is_sata(device)) {
+ task->ata_task.device_control_reg_update = 1;
+ if (force_phy_id >= 0) {
+ task->ata_task.force_phy = true;
+ task->ata_task.force_phy_id = force_phy_id;
+ }
+ memcpy(&task->ata_task.fis, parameter, para_len);
+ } else {
+ memcpy(&task->ssp_task, parameter, para_len);
+ }
+
+ task->task_done = sas_task_internal_done;
+ task->tmf = tmf;
+
+ task->slow_task->timer.function = sas_task_internal_timedout;
+ task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
+ add_timer(&task->slow_task->timer);
+
+ res = i->dft->lldd_execute_task(task, GFP_KERNEL);
+ if (res) {
+ del_timer_sync(&task->slow_task->timer);
+ pr_err("executing TMF task failed %016llx (%d)\n",
+ SAS_ADDR(device->sas_addr), res);
+ break;
+ }
+
+ wait_for_completion(&task->slow_task->completion);
+
+ if (i->dft->lldd_tmf_exec_complete)
+ i->dft->lldd_tmf_exec_complete(device);
+
+ res = TMF_RESP_FUNC_FAILED;
+
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ pr_err("TMF task timeout for %016llx and not done\n",
+ SAS_ADDR(device->sas_addr));
+ if (i->dft->lldd_tmf_aborted)
+ i->dft->lldd_tmf_aborted(task);
+ break;
+ }
+ pr_warn("TMF task timeout for %016llx and done\n",
+ SAS_ADDR(device->sas_addr));
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
+ res = TMF_RESP_FUNC_COMPLETE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+ res = TMF_RESP_FUNC_SUCC;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun
+ */
+ pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
+ SAS_ADDR(device->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ res = task->task_status.residual;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ pr_warn("TMF task blocked task error %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EMSGSIZE;
+ break;
+ }
+
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_OPEN_REJECT) {
+ pr_warn("TMF task open reject failed %016llx\n",
+ SAS_ADDR(device->sas_addr));
+ res = -EIO;
+ } else {
+ pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
+ SAS_ADDR(device->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ }
+ sas_free_task(task);
+ task = NULL;
+ }
+
+ if (retry == TASK_RETRY)
+ pr_warn("executing TMF for %016llx failed after %d attempts!\n",
+ SAS_ADDR(device->sas_addr), TASK_RETRY);
+ sas_free_task(task);
+
+ return res;
+}
+
+static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
+ struct sas_tmf_task *tmf)
+{
+ struct sas_ssp_task ssp_task;
+
+ if (!(device->tproto & SAS_PROTOCOL_SSP))
+ return TMF_RESP_FUNC_ESUPP;
+
+ memcpy(ssp_task.LUN, lun, 8);
+
+ return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
+}
+
+int sas_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_ABORT_TASK_SET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_abort_task_set);
+
+int sas_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_CLEAR_TASK_SET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_clear_task_set);
+
+int sas_lu_reset(struct domain_device *dev, u8 *lun)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_LU_RESET,
+ };
+
+ return sas_execute_ssp_tmf(dev, lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_lu_reset);
+
+int sas_query_task(struct sas_task *task, u16 tag)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_QUERY_TASK,
+ .tag_of_task_to_be_managed = tag,
+ };
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct scsi_lun lun;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+
+ return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_query_task);
+
+int sas_abort_task(struct sas_task *task, u16 tag)
+{
+ struct sas_tmf_task tmf_task = {
+ .tmf = TMF_ABORT_TASK,
+ .tag_of_task_to_be_managed = tag,
+ };
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct domain_device *dev = task->dev;
+ struct scsi_lun lun;
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+
+ return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+}
+EXPORT_SYMBOL_GPL(sas_abort_task);
+
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c
index 2966ead1d421..e9d291007817 100644
--- a/drivers/scsi/libsas/sas_task.c
+++ b/drivers/scsi/libsas/sas_task.c
@@ -15,11 +15,14 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
tstat->resp = SAS_TASK_COMPLETE;
- if (iu->datapres == 0)
+ switch (iu->datapres) {
+ case SAS_DATAPRES_NO_DATA:
tstat->stat = iu->status;
- else if (iu->datapres == 1)
+ break;
+ case SAS_DATAPRES_RESPONSE_DATA:
tstat->stat = iu->resp_data[3];
- else if (iu->datapres == 2) {
+ break;
+ case SAS_DATAPRES_SENSE_DATA:
tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
tstat->buf_valid_size =
min_t(int, SAS_STATUS_BUF_SIZE,
@@ -29,10 +32,11 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task,
if (iu->status != SAM_STAT_CHECK_CONDITION)
dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
SAS_ADDR(task->dev->sas_addr), iu->status);
- }
- else
+ break;
+ default:
/* when datapres contains corrupt/unknown value... */
tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
+ }
}
EXPORT_SYMBOL_GPL(sas_ssp_task_response);
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 092a971d066b..bbd1faf41e80 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -33,4 +33,4 @@ obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
- lpfc_nvme.o lpfc_nvmet.o
+ lpfc_nvme.o lpfc_nvmet.o lpfc_vmid.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2f8e6d0a926f..9ad233b40a9e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -48,9 +48,6 @@ struct lpfc_sli2_slim;
the NameServer before giving up. */
#define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */
#define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */
-#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi
- cmnd for menlo needs nearly twice as for firmware
- downloads using bsg */
#define LPFC_DEFAULT_XPSGL_SIZE 256
#define LPFC_MAX_SG_TABLESIZE 0xffff
@@ -71,8 +68,6 @@ struct lpfc_sli2_slim;
#define LPFC_MIN_TGT_QDEPTH 10
#define LPFC_MAX_TGT_QDEPTH 0xFFFF
-#define LPFC_MAX_BUCKET_COUNT 20 /* Maximum no. of buckets for stat data
- collection. */
/*
* Following time intervals are used of adjusting SCSI device
* queue depths when there are driver resource error or Firmware
@@ -408,6 +403,7 @@ struct lpfc_trunk_link {
link1,
link2,
link3;
+ u32 phy_lnk_speed;
};
/* Format of congestion module parameters */
@@ -496,52 +492,50 @@ struct lpfc_cgn_info {
__le32 cgn_alarm_hr[24];
__le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
- /* Start of congestion statistics */
- uint8_t cgn_stat_npm; /* Notifications per minute */
-
- /* Start Time */
- uint8_t cgn_stat_month;
- uint8_t cgn_stat_day;
- uint8_t cgn_stat_year;
- uint8_t cgn_stat_hour;
- uint8_t cgn_stat_minute;
- uint8_t cgn_pad2[2];
-
- __le32 cgn_notification;
- __le32 cgn_peer_notification;
- __le32 link_integ_notification;
- __le32 delivery_notification;
-
- uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
- uint8_t cgn_stat_cgn_day;
- uint8_t cgn_stat_cgn_year;
- uint8_t cgn_stat_cgn_hour;
- uint8_t cgn_stat_cgn_min;
- uint8_t cgn_stat_cgn_sec;
-
- uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
- uint8_t cgn_stat_peer_day;
- uint8_t cgn_stat_peer_year;
- uint8_t cgn_stat_peer_hour;
- uint8_t cgn_stat_peer_min;
- uint8_t cgn_stat_peer_sec;
-
- uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
- uint8_t cgn_stat_lnk_day;
- uint8_t cgn_stat_lnk_year;
- uint8_t cgn_stat_lnk_hour;
- uint8_t cgn_stat_lnk_min;
- uint8_t cgn_stat_lnk_sec;
-
- uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
- uint8_t cgn_stat_del_day;
- uint8_t cgn_stat_del_year;
- uint8_t cgn_stat_del_hour;
- uint8_t cgn_stat_del_min;
- uint8_t cgn_stat_del_sec;
-#define LPFC_CGN_STAT_SIZE 48
-#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \
- LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
+ struct_group(cgn_stat,
+ uint8_t cgn_stat_npm; /* Notifications per minute */
+
+ /* Start Time */
+ uint8_t cgn_stat_month;
+ uint8_t cgn_stat_day;
+ uint8_t cgn_stat_year;
+ uint8_t cgn_stat_hour;
+ uint8_t cgn_stat_minute;
+ uint8_t cgn_pad2[2];
+
+ __le32 cgn_notification;
+ __le32 cgn_peer_notification;
+ __le32 link_integ_notification;
+ __le32 delivery_notification;
+
+ uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
+ uint8_t cgn_stat_cgn_day;
+ uint8_t cgn_stat_cgn_year;
+ uint8_t cgn_stat_cgn_hour;
+ uint8_t cgn_stat_cgn_min;
+ uint8_t cgn_stat_cgn_sec;
+
+ uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
+ uint8_t cgn_stat_peer_day;
+ uint8_t cgn_stat_peer_year;
+ uint8_t cgn_stat_peer_hour;
+ uint8_t cgn_stat_peer_min;
+ uint8_t cgn_stat_peer_sec;
+
+ uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
+ uint8_t cgn_stat_lnk_day;
+ uint8_t cgn_stat_lnk_year;
+ uint8_t cgn_stat_lnk_hour;
+ uint8_t cgn_stat_lnk_min;
+ uint8_t cgn_stat_lnk_sec;
+
+ uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
+ uint8_t cgn_stat_del_day;
+ uint8_t cgn_stat_del_year;
+ uint8_t cgn_stat_del_hour;
+ uint8_t cgn_stat_del_min;
+ uint8_t cgn_stat_del_sec;
+ );
__le32 cgn_info_crc;
#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
@@ -594,6 +588,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -612,6 +607,7 @@ struct lpfc_vport {
#define FC_CT_RSNN_NN 0x4 /* RSNN_NN accepted by switch */
#define FC_CT_RSPN_ID 0x8 /* RSPN_ID accepted by switch */
#define FC_CT_RFT_ID 0x10 /* RFT_ID accepted by switch */
+#define FC_CT_RPRT_DEFER 0x20 /* Defer issuing FDMI RPRT */
struct list_head fc_nodes;
@@ -669,8 +665,6 @@ struct lpfc_vport {
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
- int unreg_vpi_cmpl;
-
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
@@ -716,6 +710,7 @@ struct lpfc_vport {
#define LPFC_VMID_QFPA_CMPL 0x4
#define LPFC_VMID_QOS_ENABLED 0x8
#define LPFC_VMID_TIMER_ENBLD 0x10
+#define LPFC_VMID_TYPE_PRIO 0x20
struct fc_qfpa_res *qfpa_res;
struct fc_vport *fc_vport;
@@ -736,14 +731,11 @@ struct lpfc_vport {
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
#endif
- uint8_t stat_data_enabled;
- uint8_t stat_data_blocked;
struct list_head rcv_buffer_list;
unsigned long rcv_buffer_time_stamp;
uint32_t vport_flag;
-#define STATIC_VPORT 1
-#define FAWWPN_SET 2
-#define FAWWPN_PARAM_CHG 4
+#define STATIC_VPORT 0x1
+#define FAWWPN_PARAM_CHG 0x2
uint16_t fdmi_num_disc;
uint32_t fdmi_hba_mask;
@@ -900,6 +892,11 @@ enum lpfc_irq_chann_mode {
NHT_MODE,
};
+enum lpfc_hba_bit_flags {
+ FABRIC_COMANDS_BLOCKED,
+ HBA_PCI_ERR,
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -920,6 +917,10 @@ struct lpfc_hba {
(struct lpfc_vport *vport,
struct lpfc_io_buf *lpfc_cmd,
uint8_t tmo);
+ int (*lpfc_scsi_prep_task_mgmt_cmd)
+ (struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd);
/* IOCB interface function jump table entries */
int (*__lpfc_sli_issue_iocb)
@@ -931,8 +932,6 @@ struct lpfc_hba {
void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
struct lpfc_iocbq *);
int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
- IOCB_t * (*lpfc_get_iocb_from_iocbq)
- (struct lpfc_iocbq *);
void (*lpfc_scsi_cmd_iocb_cmpl)
(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
@@ -965,7 +964,24 @@ struct lpfc_hba {
int (*lpfc_bg_scsi_prep_dma_buf)
(struct lpfc_hba *, struct lpfc_io_buf *);
- /* Add new entries here */
+
+ /* Prep SLI WQE/IOCB jump table entries */
+ void (*__lpfc_sli_prep_els_req_rsp)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd,
+ u8 tmo, u8 expect_rsp);
+ void (*__lpfc_sli_prep_gen_req)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u32 num_entry, u8 tmo);
+ void (*__lpfc_sli_prep_xmit_seq64)(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi,
+ u16 ox_id, u32 num_entry, u8 rctl,
+ u8 last_seq, u8 cr_cx_cmd);
+ void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag,
+ u8 ulp_class, u16 cqid, bool ia,
+ bool wqec);
/* expedite pool */
struct lpfc_epd_pool epd_pool;
@@ -1005,6 +1021,7 @@ struct lpfc_hba {
#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
#define LS_CT_VEN_RPA 0x20 /* Vendor RPA sent to switch */
+#define LS_EXTERNAL_LOOPBACK 0x40 /* External loopback plug inserted */
uint32_t hba_flag; /* hba generic flags */
#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
@@ -1023,15 +1040,13 @@ struct lpfc_hba {
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
-#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
* capability
*/
-#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
-#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
+#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
@@ -1039,7 +1054,9 @@ struct lpfc_hba {
#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
+#define HBA_RHBA_CMPL 0x20000000 /* RHBA FDMI command is successful */
+ struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -1139,8 +1156,6 @@ struct lpfc_hba {
uint32_t cfg_nvme_seg_cnt;
uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
- uint64_t cfg_soft_wwnn;
- uint64_t cfg_soft_wwpn;
uint32_t cfg_hba_queue_depth;
uint32_t cfg_enable_hba_reset;
uint32_t cfg_enable_hba_heartbeat;
@@ -1165,6 +1180,16 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
@@ -1186,9 +1211,6 @@ struct lpfc_hba {
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP 1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
@@ -1265,7 +1287,6 @@ struct lpfc_hba {
#define VPD_PORT 0x8 /* valid vpd port data */
#define VPD_MASK 0xf /* mask for any vpd data */
- uint8_t soft_wwn_enable;
struct timer_list fcp_poll_timer;
struct timer_list eratt_poll;
@@ -1331,7 +1352,6 @@ struct lpfc_hba {
atomic_t fabric_iocb_count;
struct timer_list fabric_block_timer;
unsigned long bit_flags;
-#define FABRIC_COMANDS_BLOCKED 0
atomic_t num_rsrc_err;
atomic_t num_cmd_success;
unsigned long last_rsrc_error_time;
@@ -1413,15 +1433,6 @@ struct lpfc_hba {
*/
#define QUE_BUFTAG_BIT (1<<31)
uint32_t buffer_tag_count;
- int wait_4_mlo_maint_flg;
- wait_queue_head_t wait_4_mlo_m_q;
- /* data structure used for latency data collection */
-#define LPFC_NO_BUCKET 0
-#define LPFC_LINEAR_BUCKET 1
-#define LPFC_POWER2_BUCKET 2
- uint8_t bucket_type;
- uint32_t bucket_base;
- uint32_t bucket_step;
/* Maximum number of events that can be outstanding at any time*/
#define LPFC_MAX_EVT_COUNT 512
@@ -1449,8 +1460,6 @@ struct lpfc_hba {
/* RAS Support */
struct lpfc_ras_fwlog ras_fwlog;
- uint8_t menlo_flag; /* menlo generic flags */
-#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
@@ -1545,16 +1554,13 @@ struct lpfc_hba {
/* cgn_reg_signal and cgn_init_reg_signal use
* enum fc_edc_cg_signal_cap_types
*/
- u16 cgn_fpin_frequency;
+ u16 cgn_fpin_frequency; /* In units of msecs */
#define LPFC_FPIN_INIT_FREQ 0xffff
u32 cgn_sig_freq;
u32 cgn_acqe_cnt;
/* RX monitor handling for CMF */
- struct rxtable_entry *rxtable; /* RX_monitor information */
- atomic_t rxtable_idx_head;
-#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
- atomic_t rxtable_idx_tail;
+ struct lpfc_rx_info_monitor *rx_monitor;
atomic_t rx_max_read_cnt; /* Maximum read bytes */
uint64_t rx_block_cnt;
@@ -1591,10 +1597,11 @@ struct lpfc_hba {
char os_host_name[MAXHOSTNAMELEN];
- /* SCSI host template information - for physical port */
- struct scsi_host_template port_template;
- /* SCSI host template information - for all vports */
- struct scsi_host_template vport_template;
+ /* LD Signaling */
+ u32 degrade_activate_threshold;
+ u32 degrade_deactivate_threshold;
+ u32 fec_degrade_interval;
+
atomic_t dbg_log_idx;
atomic_t dbg_log_cnt;
atomic_t dbg_log_dmping;
@@ -1603,7 +1610,8 @@ struct lpfc_hba {
#define LPFC_MAX_RXMONITOR_ENTRY 800
#define LPFC_MAX_RXMONITOR_DUMP 32
-struct rxtable_entry {
+struct rx_info_entry {
+ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
uint64_t total_bytes; /* Total no of read bytes requested */
uint64_t rcv_bytes; /* Total no of read bytes completed */
uint64_t avg_io_size;
@@ -1617,6 +1625,13 @@ struct rxtable_entry {
uint32_t timer_interval;
};
+struct lpfc_rx_info_monitor {
+ struct rx_info_entry *ring; /* info organized in a circular buffer */
+ u32 head_idx, tail_idx; /* index to head/tail of ring */
+ spinlock_t lock; /* spinlock for ring */
+ u32 entries; /* storing number entries/size of ring */
+};
+
static inline struct Scsi_Host *
lpfc_shost_from_vport(struct lpfc_vport *vport)
{
@@ -1796,3 +1811,75 @@ static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
{
return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
}
+
+static inline
+u8 get_job_ulpstatus(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(lpfc_wcqe_c_status, &iocbq->wcqe_cmpl);
+ else
+ return iocbq->iocb.ulpStatus;
+}
+
+static inline
+u32 get_job_word4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.parameter;
+ else
+ return iocbq->iocb.un.ulpWord[4];
+}
+
+static inline
+u8 get_job_cmnd(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_cmnd, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpCommand;
+}
+
+static inline
+u16 get_job_ulpcontext(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_ctxt_tag, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.ulpContext;
+}
+
+static inline
+u16 get_job_rcvoxid(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_rcvoxid, &iocbq->wqe.generic.wqe_com);
+ else
+ return iocbq->iocb.unsli3.rcvsli3.ox_id;
+}
+
+static inline
+u32 get_job_data_placed(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wcqe_cmpl.total_data_placed;
+ else
+ return iocbq->iocb.un.genreq64.bdl.bdeSize;
+}
+
+static inline
+u32 get_job_abtsiotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return iocbq->wqe.abort_cmd.wqe_com.abort_tag;
+ else
+ return iocbq->iocb.un.acxri.abortIoTag;
+}
+
+static inline
+u32 get_job_els_rsp64_did(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return bf_get(wqe_els_did, &iocbq->wqe.els_req.wqe_dest);
+ else
+ return iocbq->iocb.un.elsreq64.remoteID;
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index dd4c51b6ef4e..ef1481326fd7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -922,25 +922,6 @@ lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the Menlo Maintenance sli flag.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- (phba->sli.sli_flag & LPFC_MENLO_MAINT));
-}
-
-/**
* lpfc_vportnum_show - Return the port number in ascii of the hba
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -1109,10 +1090,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
"Unknown\n");
break;
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Menlo Maint Mode\n");
- else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
if (vport->fc_flag & FC_PUBLIC_LOOP)
len += scnprintf(buf + len, PAGE_SIZE-len,
" Public Loop\n");
@@ -1120,12 +1098,22 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
len += scnprintf(buf + len, PAGE_SIZE-len,
" Private Loop\n");
} else {
- if (vport->fc_flag & FC_FABRIC)
- len += scnprintf(buf + len, PAGE_SIZE-len,
- " Fabric\n");
- else
+ if (vport->fc_flag & FC_FABRIC) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag &
+ LPFC_FAWWPN_FABRIC)
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ " Fabric FA-PWWN\n");
+ else
+ len += scnprintf(buf + len,
+ PAGE_SIZE - len,
+ " Fabric\n");
+ } else {
len += scnprintf(buf + len, PAGE_SIZE-len,
" Point-2-Point\n");
+ }
}
}
@@ -1315,6 +1303,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
@@ -1709,25 +1700,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
before_fc_flag = phba->pport->fc_flag;
sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
- /* Disable SR-IOV virtual functions if enabled */
- if (phba->cfg_sriov_nr_virtfn) {
- pci_disable_sriov(pdev);
- phba->cfg_sriov_nr_virtfn = 0;
- }
+ if (opcode == LPFC_FW_DUMP) {
+ init_completion(&online_compl);
+ phba->fw_dump_cmpl = &online_compl;
+ } else {
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
- if (opcode == LPFC_FW_DUMP)
- phba->hba_flag |= HBA_FW_DUMP_OP;
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (status != 0)
+ return status;
- if (status != 0) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return status;
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
}
- /* wait for the device to be quiesced before firmware reset */
- msleep(100);
-
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
@@ -1756,24 +1747,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
return rc;
}
/* keep the original port state */
- if (before_fc_flag & FC_OFFLINE_MODE)
- goto out;
-
- init_completion(&online_compl);
- job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (!job_posted)
+ if (before_fc_flag & FC_OFFLINE_MODE) {
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
goto out;
+ }
- wait_for_completion(&online_compl);
+ /* Firmware dump will trigger an HA_ERATT event, and
+ * lpfc_handle_eratt_s4 routine already handles bringing the port back
+ * online.
+ */
+ if (opcode == LPFC_FW_DUMP) {
+ wait_for_completion(phba->fw_dump_cmpl);
+ } else {
+ init_completion(&online_compl);
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
+ wait_for_completion(&online_compl);
+ }
out:
/* in any case, restore the virtual functions enabled as before */
if (sriov_nr_virtfn) {
+ /* If fw_dump was performed, first disable to clean up */
+ if (opcode == LPFC_FW_DUMP) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+
sriov_err =
lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
if (!sriov_err)
@@ -2796,7 +2805,6 @@ static DEVICE_ATTR(option_rom_version, S_IRUGO,
lpfc_option_rom_version_show, NULL);
static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
lpfc_num_discovered_ports_show, NULL);
-static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
static DEVICE_ATTR_RO(lpfc_drvr_version);
static DEVICE_ATTR_RO(lpfc_enable_fip);
@@ -2817,7 +2825,6 @@ static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
NULL);
static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
-static char *lpfc_soft_wwn_key = "C99G71SL8032A";
#define WWN_SZ 8
/**
* lpfc_wwn_set - Convert string to the 8 byte WWN value.
@@ -2861,229 +2868,7 @@ lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
}
return 0;
}
-/**
- * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: containing the string lpfc_soft_wwn_key.
- * @count: must be size of lpfc_soft_wwn_key.
- *
- * Returns:
- * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
- * length of buf indicates success
- **/
-static ssize_t
-lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- unsigned int cnt = count;
- uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
- u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
- /*
- * We're doing a simple sanity check for soft_wwpn setting.
- * We require that the user write a specific key to enable
- * the soft_wwpn attribute to be settable. Once the attribute
- * is written, the enable key resets. If further updates are
- * desired, the key must be written again to re-enable the
- * attribute.
- *
- * The "key" is not secret - it is a hardcoded string shown
- * here. The intent is to protect against the random user or
- * application that is just writing attributes.
- */
- if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0051 lpfc soft wwpn can not be enabled: "
- "fawwpn is enabled\n");
- return -EINVAL;
- }
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if ((cnt != strlen(lpfc_soft_wwn_key)) ||
- (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
- return -EINVAL;
-
- phba->soft_wwn_enable = 1;
-
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- "lpfc%d: soft_wwpn assignment has been enabled.\n",
- phba->brd_no);
- dev_printk(KERN_WARNING, &phba->pcidev->dev,
- " The soft_wwpn feature is not supported by Broadcom.");
-
- return count;
-}
-static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
-
-/**
- * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwpn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwpn);
-}
-
-/**
- * lpfc_soft_wwpn_store - Set the ww port name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: contains the wwpn in hexadecimal.
- * @count: number of wwpn bytes in buf
- *
- * Returns:
- * -EACCES hba reset not enabled, adapter over temp
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
- * -EIO error taking adapter offline or online
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- struct completion online_compl;
- int stat1 = 0, stat2 = 0;
- unsigned int cnt = count;
- u8 wwpn[WWN_SZ];
- int rc;
-
- if (!phba->cfg_enable_hba_reset)
- return -EACCES;
- spin_lock_irq(&phba->hbalock);
- if (phba->over_temp_state == HBA_OVER_TEMP) {
- spin_unlock_irq(&phba->hbalock);
- return -EACCES;
- }
- spin_unlock_irq(&phba->hbalock);
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
-
- /* lock setting wwpn, wwnn down */
- phba->soft_wwn_enable = 0;
-
- rc = lpfc_wwn_set(buf, cnt, wwpn);
- if (rc) {
- /* not able to set wwpn, unlock it */
- phba->soft_wwn_enable = 1;
- return rc;
- }
-
- phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
- fc_host_port_name(shost) = phba->cfg_soft_wwpn;
- if (phba->cfg_soft_wwnn)
- fc_host_node_name(shost) = phba->cfg_soft_wwnn;
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
-
- stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- if (stat1)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0463 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat1);
- init_completion(&online_compl);
- rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
- LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
-
- wait_for_completion(&online_compl);
- if (stat2)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0464 lpfc_soft_wwpn attribute set failed to "
- "reinit adapter - %d\n", stat2);
- return (stat1 || stat2) ? -EIO : count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwpn);
-
-/**
- * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: on return contains the wwnn in hexadecimal.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
- (unsigned long long)phba->cfg_soft_wwnn);
-}
-
-/**
- * lpfc_soft_wwnn_store - sets the ww node name of the adapter
- * @dev: class device that is converted into a Scsi_host.
- * @attr: device attribute, not used.
- * @buf: contains the ww node name in hexadecimal.
- * @count: number of wwnn bytes in buf.
- *
- * Returns:
- * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
- * value of count on success
- **/
-static ssize_t
-lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
- unsigned int cnt = count;
- u8 wwnn[WWN_SZ];
- int rc;
-
- /* count may include a LF at end of string */
- if (buf[cnt-1] == '\n')
- cnt--;
-
- if (!phba->soft_wwn_enable)
- return -EINVAL;
-
- rc = lpfc_wwn_set(buf, cnt, wwnn);
- if (rc) {
- /* Allow wwnn to be set many times, as long as the enable
- * is set. However, once the wwpn is set, everything locks.
- */
- return rc;
- }
-
- phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
-
- dev_printk(KERN_NOTICE, &phba->pcidev->dev,
- "lpfc%d: soft_wwnn set. Value will take effect upon "
- "setting of the soft_wwpn\n", phba->brd_no);
-
- return count;
-}
-static DEVICE_ATTR_RW(lpfc_soft_wwnn);
/**
* lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
@@ -3960,8 +3745,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
"Enable FC4 Protocol support - FCP / NVME");
/*
@@ -4308,333 +4093,6 @@ lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
*/
static DEVICE_ATTR_RO(lpfc_static_vport);
-/**
- * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- * @count: Size of the data buffer.
- *
- * This function get called when a user write to the lpfc_stat_data_ctrl
- * sysfs file. This function parse the command written to the sysfs file
- * and take appropriate action. These commands are used for controlling
- * driver statistical data collection.
- * Following are the command this function handles.
- *
- * setbucket <bucket_type> <base> <step>
- * = Set the latency buckets.
- * destroybucket = destroy all the buckets.
- * start = start data collection
- * stop = stop data collection
- * reset = reset the collected data
- **/
-static ssize_t
-lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-#define LPFC_MAX_DATA_CTRL_LEN 1024
- static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
- unsigned long i;
- char *str_ptr, *token;
- struct lpfc_vport **vports;
- struct Scsi_Host *v_shost;
- char *bucket_type_str, *base_str, *step_str;
- unsigned long base, step, bucket_type;
-
- if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
- if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
- return -EINVAL;
-
- strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
- str_ptr = &bucket_data[0];
- /* Ignore this token - this is command token */
- token = strsep(&str_ptr, "\t ");
- if (!token)
- return -EINVAL;
-
- bucket_type_str = strsep(&str_ptr, "\t ");
- if (!bucket_type_str)
- return -EINVAL;
-
- if (!strncmp(bucket_type_str, "linear", strlen("linear")))
- bucket_type = LPFC_LINEAR_BUCKET;
- else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
- bucket_type = LPFC_POWER2_BUCKET;
- else
- return -EINVAL;
-
- base_str = strsep(&str_ptr, "\t ");
- if (!base_str)
- return -EINVAL;
- base = simple_strtoul(base_str, NULL, 0);
-
- step_str = strsep(&str_ptr, "\t ");
- if (!step_str)
- return -EINVAL;
- step = simple_strtoul(step_str, NULL, 0);
- if (!step)
- return -EINVAL;
-
- /* Block the data collection for every vport */
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(v_shost->host_lock);
- /* Block and reset data collection */
- vports[i]->stat_data_blocked = 1;
- if (vports[i]->stat_data_enabled)
- lpfc_vport_reset_stat_data(vports[i]);
- spin_unlock_irq(v_shost->host_lock);
- }
-
- /* Set the bucket attributes */
- phba->bucket_type = bucket_type;
- phba->bucket_base = base;
- phba->bucket_step = step;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
-
- /* Unblock data collection */
- spin_lock_irq(v_shost->host_lock);
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(v_shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
- vports = lpfc_create_vport_work_array(phba);
- if (vports == NULL)
- return -ENOMEM;
-
- for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
- v_shost = lpfc_shost_from_vport(vports[i]);
- spin_lock_irq(shost->host_lock);
- vports[i]->stat_data_blocked = 1;
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- vports[i]->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- }
- lpfc_destroy_vport_work_array(phba, vports);
- phba->bucket_type = LPFC_NO_BUCKET;
- phba->bucket_base = 0;
- phba->bucket_step = 0;
- return strlen(buf);
- }
-
- if (!strncmp(buf, "start", strlen("start"))) {
- /* If no buckets configured return error */
- if (phba->bucket_type == LPFC_NO_BUCKET)
- return -EINVAL;
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_alloc_bucket(vport);
- vport->stat_data_enabled = 1;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "stop", strlen("stop"))) {
- spin_lock_irq(shost->host_lock);
- if (vport->stat_data_enabled == 0) {
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- lpfc_free_bucket(vport);
- vport->stat_data_enabled = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
-
- if (!strncmp(buf, "reset", strlen("reset"))) {
- if ((phba->bucket_type == LPFC_NO_BUCKET)
- || !vport->stat_data_enabled)
- return strlen(buf);
- spin_lock_irq(shost->host_lock);
- vport->stat_data_blocked = 1;
- lpfc_vport_reset_stat_data(vport);
- vport->stat_data_blocked = 0;
- spin_unlock_irq(shost->host_lock);
- return strlen(buf);
- }
- return -EINVAL;
-}
-
-
-/**
- * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
- * @dev: Pointer to class device.
- * @attr: Unused.
- * @buf: Data buffer.
- *
- * This function is the read call back function for
- * lpfc_stat_data_ctrl sysfs file. This function report the
- * current statistical data collection state.
- **/
-static ssize_t
-lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int index = 0;
- int i;
- char *bucket_type;
- unsigned long bucket_value;
-
- switch (phba->bucket_type) {
- case LPFC_LINEAR_BUCKET:
- bucket_type = "linear";
- break;
- case LPFC_POWER2_BUCKET:
- bucket_type = "power2";
- break;
- default:
- bucket_type = "No Bucket";
- break;
- }
-
- sprintf(&buf[index], "Statistical Data enabled :%d, "
- "blocked :%d, Bucket type :%s, Bucket base :%d,"
- " Bucket step :%d\nLatency Ranges :",
- vport->stat_data_enabled, vport->stat_data_blocked,
- bucket_type, phba->bucket_base, phba->bucket_step);
- index = strlen(buf);
- if (phba->bucket_type != LPFC_NO_BUCKET) {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- if (phba->bucket_type == LPFC_LINEAR_BUCKET)
- bucket_value = phba->bucket_base +
- phba->bucket_step * i;
- else
- bucket_value = phba->bucket_base +
- (1 << i) * phba->bucket_step;
-
- if (index + 10 > PAGE_SIZE)
- break;
- sprintf(&buf[index], "%08ld ", bucket_value);
- index = strlen(buf);
- }
- }
- sprintf(&buf[index], "\n");
- return strlen(buf);
-}
-
-/*
- * Sysfs attribute to control the statistical data collection.
- */
-static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
-
-/*
- * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
- */
-
-/*
- * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
- * for each target.
- */
-#define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
-#define MAX_STAT_DATA_SIZE_PER_TARGET \
- STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
-
-
-/**
- * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
- * @filp: sysfs file
- * @kobj: Pointer to the kernel object
- * @bin_attr: Attribute object
- * @buf: Buffer pointer
- * @off: File offset
- * @count: Buffer size
- *
- * This function is the read call back function for lpfc_drvr_stat_data
- * sysfs file. This function export the statistical data to user
- * applications.
- **/
-static ssize_t
-sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct device *dev = container_of(kobj, struct device,
- kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- int i = 0, index = 0;
- unsigned long nport_index;
- struct lpfc_nodelist *ndlp = NULL;
- nport_index = (unsigned long)off /
- MAX_STAT_DATA_SIZE_PER_TARGET;
-
- if (!vport->stat_data_enabled || vport->stat_data_blocked
- || (phba->bucket_type == LPFC_NO_BUCKET))
- return 0;
-
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
- if (!ndlp->lat_data)
- continue;
-
- if (nport_index > 0) {
- nport_index--;
- continue;
- }
-
- if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
- > count)
- break;
-
- if (!ndlp->lat_data)
- continue;
-
- /* Print the WWN */
- sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
- ndlp->nlp_portname.u.wwn[0],
- ndlp->nlp_portname.u.wwn[1],
- ndlp->nlp_portname.u.wwn[2],
- ndlp->nlp_portname.u.wwn[3],
- ndlp->nlp_portname.u.wwn[4],
- ndlp->nlp_portname.u.wwn[5],
- ndlp->nlp_portname.u.wwn[6],
- ndlp->nlp_portname.u.wwn[7]);
-
- index = strlen(buf);
-
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
- sprintf(&buf[index], "%010u,",
- ndlp->lat_data[i].cmd_count);
- index = strlen(buf);
- }
- sprintf(&buf[index], "\n");
- index = strlen(buf);
- }
- spin_unlock_irq(shost->host_lock);
- return index;
-}
-
-static struct bin_attribute sysfs_drvr_stat_data_attr = {
- .attr = {
- .name = "lpfc_drvr_stat_data",
- .mode = S_IRUSR,
- },
- .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
- .read = sysfs_drvr_stat_data_read,
- .write = NULL,
-};
-
/*
# lpfc_link_speed: Link speed selection for initializing the Fibre Channel
# connection.
@@ -6412,7 +5870,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_option_rom_version.attr,
&dev_attr_link_state.attr,
&dev_attr_num_discovered_ports.attr,
- &dev_attr_menlo_mgmt_mode.attr,
&dev_attr_lpfc_drvr_version.attr,
&dev_attr_lpfc_enable_fip.attr,
&dev_attr_lpfc_temp_sensor.attr,
@@ -6477,9 +5934,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_nvme_enable_fb.attr,
&dev_attr_lpfc_nvmet_fb_size.attr,
&dev_attr_lpfc_enable_bg.attr,
- &dev_attr_lpfc_soft_wwnn.attr,
- &dev_attr_lpfc_soft_wwpn.attr,
- &dev_attr_lpfc_soft_wwn_enable.attr,
&dev_attr_lpfc_enable_hba_reset.attr,
&dev_attr_lpfc_enable_hba_heartbeat.attr,
&dev_attr_lpfc_EnableXLane.attr,
@@ -6492,7 +5946,6 @@ static struct attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_xlane_priority.attr,
&dev_attr_lpfc_sg_seg_cnt.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_aer_support.attr,
&dev_attr_lpfc_aer_state_cleanup.attr,
&dev_attr_lpfc_sriov_nr_virtfn.attr,
@@ -6551,7 +6004,6 @@ static struct attribute *lpfc_vport_attrs[] = {
&dev_attr_npiv_info.attr,
&dev_attr_lpfc_enable_da_id.attr,
&dev_attr_lpfc_max_scsicmpl_time.attr,
- &dev_attr_lpfc_stat_data_ctrl.attr,
&dev_attr_lpfc_static_vport.attr,
&dev_attr_cmf_info.attr,
NULL,
@@ -6764,17 +6216,14 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
int error;
- error = sysfs_create_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
-
/* Virtual ports do not need ctrl_reg and mbox */
- if (error || vport->port_type == LPFC_NPIV_PORT)
- goto out;
+ if (vport->port_type == LPFC_NPIV_PORT)
+ return 0;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_ctlreg_attr);
if (error)
- goto out_remove_stat_attr;
+ goto out;
error = sysfs_create_bin_file(&shost->shost_dev.kobj,
&sysfs_mbox_attr);
@@ -6784,9 +6233,6 @@ lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
return 0;
out_remove_ctlreg_attr:
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
-out_remove_stat_attr:
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
out:
return error;
}
@@ -6799,8 +6245,7 @@ void
lpfc_free_sysfs_attr(struct lpfc_vport *vport)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- sysfs_remove_bin_file(&shost->shost_dev.kobj,
- &sysfs_drvr_stat_data_attr);
+
/* Virtual ports do not need ctrl_reg and mbox */
if (vport->port_type == LPFC_NPIV_PORT)
return;
@@ -7083,17 +6528,34 @@ lpfc_get_stats(struct Scsi_Host *shost)
memset(hs, 0, sizeof (struct fc_host_statistics));
hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
+ hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
+
/*
- * The MBX_READ_STATUS returns tx_k_bytes which has to
- * converted to words
+ * The MBX_READ_STATUS returns tx_k_bytes which has to be
+ * converted to words.
+ *
+ * Check if extended byte flag is set, to know when to collect upper
+ * bits of 64 bit wide statistics counter.
*/
- hs->tx_words = (uint64_t)
- ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
- * (uint64_t)256);
- hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
- hs->rx_words = (uint64_t)
- ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
- * (uint64_t)256);
+ if (pmb->un.varRdStatus.xkb & RD_ST_XKB) {
+ hs->tx_words = (u64)
+ ((((u64)(pmb->un.varRdStatus.xmit_xkb &
+ RD_ST_XMIT_XKB_MASK) << 32) |
+ (u64)pmb->un.varRdStatus.xmitByteCnt) *
+ (u64)256);
+ hs->rx_words = (u64)
+ ((((u64)(pmb->un.varRdStatus.rcv_xkb &
+ RD_ST_RCV_XKB_MASK) << 32) |
+ (u64)pmb->un.varRdStatus.rcvByteCnt) *
+ (u64)256);
+ } else {
+ hs->tx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
+ * (uint64_t)256);
+ hs->rx_words = (uint64_t)
+ ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
+ * (uint64_t)256);
+ }
memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
pmb->mbxCommand = MBX_READ_LNK_STAT;
@@ -7574,7 +7036,6 @@ lpfc_get_hba_function_mode(struct lpfc_hba *phba)
case PCI_DEVICE_ID_LANCER_FCOE:
case PCI_DEVICE_ID_LANCER_FCOE_VF:
case PCI_DEVICE_ID_ZEPHYR_DCSP:
- case PCI_DEVICE_ID_HORNET:
case PCI_DEVICE_ID_TIGERSHARK:
case PCI_DEVICE_ID_TOMCAT:
phba->hba_flag |= HBA_FCOE_MODE;
@@ -7709,8 +7170,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->sli_rev == LPFC_SLI_REV4)
phba->cfg_irq_chann = phba->cfg_hdw_queue;
- phba->cfg_soft_wwnn = 0L;
- phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_aer_support_init(phba, lpfc_aer_support);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index fdf08cb57207..852b025e2fec 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -88,17 +88,9 @@ struct lpfc_bsg_mbox {
uint32_t outExtWLen; /* from app */
};
-#define MENLO_DID 0x0000FC0E
-
-struct lpfc_bsg_menlo {
- struct lpfc_iocbq *cmdiocbq;
- struct lpfc_dmabuf *rmp;
-};
-
#define TYPE_EVT 1
#define TYPE_IOCB 2
#define TYPE_MBOX 3
-#define TYPE_MENLO 4
struct bsg_job_data {
uint32_t type;
struct bsg_job *set_job; /* job waiting for this iocb to finish */
@@ -106,7 +98,6 @@ struct bsg_job_data {
struct lpfc_bsg_event *evt;
struct lpfc_bsg_iocb iocb;
struct lpfc_bsg_mbox mbox;
- struct lpfc_bsg_menlo menlo;
} context_un;
};
@@ -303,15 +294,14 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp, *rmp;
struct lpfc_nodelist *ndlp;
struct lpfc_bsg_iocb *iocb;
unsigned long flags;
- unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -325,22 +315,24 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
iocb = &dd_data->context_un.iocb;
- ndlp = iocb->cmdiocbq->context_un.ndlp;
+ ndlp = iocb->cmdiocbq->ndlp;
rmp = iocb->rmp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
/* Copy the completed data or set the error status */
if (job) {
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -355,10 +347,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
rc = -EACCES;
}
} else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
bsg_reply->reply_payload_rcv_len =
lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
+ total_data_placed, 0);
}
}
@@ -388,22 +379,21 @@ static int
lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
{
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct lpfc_hba *phba = vport->phba;
struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = rdata->pnode;
struct fc_bsg_reply *bsg_reply = job->reply;
struct ulp_bde64 *bpl = NULL;
- uint32_t timeout;
struct lpfc_iocbq *cmdiocbq = NULL;
- IOCB_t *cmd;
struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
+ int request_nseg, reply_nseg;
+ u32 num_entry;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
int rc = 0;
int iocb_stat;
+ u16 ulp_context;
/* in case no data is transferred */
bsg_reply->reply_payload_rcv_len = 0;
@@ -426,8 +416,6 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto free_dd;
}
- cmd = &cmdiocbq->iocb;
-
bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!bmp) {
rc = -ENOMEM;
@@ -461,34 +449,24 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
goto free_cmp;
}
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = ndlp->nlp_rpi;
+ num_entry = request_nseg + reply_nseg;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- cmd->ulpOwner = OWN_CHIP;
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
+
+ lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
+ phba->fc_ratov * 2);
+
+ cmdiocbq->num_bdes = num_entry;
cmdiocbq->vport = phba->pport;
- cmdiocbq->context3 = bmp;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- timeout = phba->fc_ratov * 2;
- cmd->ulpTimeout = timeout;
+ cmdiocbq->cmd_dmabuf = cmp;
+ cmdiocbq->bpl_dmabuf = bmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
+ cmdiocbq->context_un.dd_data = dd_data;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
@@ -506,8 +484,8 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
- if (!cmdiocbq->context_un.ndlp) {
+ cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->ndlp) {
rc = -ENODEV;
goto free_rmp;
}
@@ -516,9 +494,9 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
if (iocb_stat == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed yet */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -575,7 +553,6 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
struct fc_bsg_ctels_reply *els_reply;
@@ -583,10 +560,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
unsigned long flags;
unsigned int rsp_size;
int rc = 0;
+ u32 ulp_status, ulp_word4, total_data_placed;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
ndlp = dd_data->context_un.iocb.ndlp;
- cmdiocbq->context1 = ndlp;
+ cmdiocbq->ndlp = ndlp;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -600,11 +578,13 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
- rsp = &rspiocbq->iocb;
- pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
+ total_data_placed = get_job_data_placed(phba, rspiocbq);
+ pcmd = cmdiocbq->cmd_dmabuf;
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
/* Copy the completed job data or determine the job status if job is
@@ -612,24 +592,28 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
*/
if (job) {
- if (rsp->ulpStatus == IOSTAT_SUCCESS) {
- rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+ if (ulp_status == IOSTAT_SUCCESS) {
+ rsp_size = total_data_placed;
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
prsp->virt,
rsp_size);
- } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+ } else if (ulp_status == IOSTAT_LS_RJT) {
bsg_reply->reply_payload_rcv_len =
sizeof(struct fc_bsg_ctels_reply);
/* LS_RJT data returned in word 4 */
- rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+ rjt_data = (uint8_t *)&ulp_word4;
els_reply = &bsg_reply->reply_data.ctels_reply;
els_reply->status = FC_CTELS_STATUS_REJECT;
els_reply->rjt_data.action = rjt_data[3];
els_reply->rjt_data.reason_code = rjt_data[2];
els_reply->rjt_data.reason_explanation = rjt_data[1];
els_reply->rjt_data.vendor_unique = rjt_data[0];
+ } else if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SEQUENCE_TIMEOUT) {
+ rc = -ETIMEDOUT;
} else {
rc = -EIO;
}
@@ -706,7 +690,6 @@ lpfc_bsg_rport_els(struct bsg_job *job)
* we won't be dma into memory that is no longer allocated to for the
* request.
*/
-
cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
ndlp->nlp_DID, elscmd);
if (!cmdiocbq) {
@@ -717,19 +700,20 @@ lpfc_bsg_rport_els(struct bsg_job *job)
/* Transfer the request payload to allocated command dma buffer */
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
- ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+ cmdiocbq->cmd_dmabuf->virt,
cmdsize);
rpi = ndlp->nlp_rpi;
if (phba->sli_rev == LPFC_SLI_REV4)
- cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
+ bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
+ phba->sli4_hba.rpi_ids[rpi]);
else
cmdiocbq->iocb.ulpContext = rpi;
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context_un.ndlp = ndlp;
- cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->context_un.dd_data = dd_data;
+ cmdiocbq->ndlp = ndlp;
+ cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -747,19 +731,13 @@ lpfc_bsg_rport_els(struct bsg_job *job)
readl(phba->HCregaddr); /* flush */
}
- cmdiocbq->context1 = lpfc_nlp_get(ndlp);
- if (!cmdiocbq->context1) {
- rc = -EIO;
- goto linkdown_err;
- }
-
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -916,20 +894,21 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_bsg_event *evt;
struct event_data *evt_dat = NULL;
struct lpfc_iocbq *iocbq;
+ IOCB_t *iocb = NULL;
size_t offset = 0;
struct list_head head;
struct ulp_bde64 *bde;
dma_addr_t dma_addr;
int i;
- struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
- struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
- struct lpfc_hbq_entry *hbqe;
+ struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
struct lpfc_sli_ct_request *ct_req;
struct bsg_job *job = NULL;
struct fc_bsg_reply *bsg_reply;
struct bsg_job_data *dd_data = NULL;
unsigned long flags;
int size = 0;
+ u32 bde_count = 0;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &piocbq->list);
@@ -959,12 +938,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
/* take accumulated byte count from the last iocbq */
iocbq = list_entry(head.prev, typeof(*iocbq), list);
- evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
+ else
+ evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
} else {
list_for_each_entry(iocbq, &head, list) {
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+ iocb = &iocbq->iocb;
+ for (i = 0; i < iocb->ulpBdeCount;
+ i++)
evt_dat->len +=
- iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+ iocb->un.cont64[i].tus.f.bdeSize;
}
}
@@ -984,22 +968,21 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
list_for_each_entry(iocbq, &head, list) {
size = 0;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- bdeBuf1 = iocbq->context2;
- bdeBuf2 = iocbq->context3;
+ bdeBuf1 = iocbq->cmd_dmabuf;
+ bdeBuf2 = iocbq->bpl_dmabuf;
}
- for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocbq->wcqe_cmpl.word3;
+ else
+ bde_count = iocbq->iocb.ulpBdeCount;
+ for (i = 0; i < bde_count; i++) {
if (phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED) {
if (i == 0) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.un.ulpWord[0];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
dmabuf = bdeBuf1;
} else if (i == 1) {
- hbqe = (struct lpfc_hbq_entry *)
- &iocbq->iocb.unsli3.
- sli3Words[4];
- size = hbqe->bde.tus.f.bdeSize;
+ size = iocbq->unsol_rcv_len;
dmabuf = bdeBuf2;
}
if ((offset + size) > evt_dat->len)
@@ -1053,17 +1036,17 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_in_buf_free(phba,
dmabuf);
} else {
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
}
break;
default:
if (!(phba->sli3_options &
LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba,
- pring,
- 1);
+ lpfc_sli3_post_buffer(phba,
+ pring,
+ 1);
break;
}
}
@@ -1086,14 +1069,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->ct_ctx[
evt_dat->immed_dat].SID);
phba->ct_ctx[evt_dat->immed_dat].rxid =
- piocbq->iocb.ulpContext;
+ get_job_ulpcontext(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].oxid =
- piocbq->iocb.unsli3.rcvsli3.ox_id;
+ get_job_rcvoxid(phba, piocbq);
phba->ct_ctx[evt_dat->immed_dat].SID =
- piocbq->iocb.un.rcvels.remoteID;
+ bf_get(wqe_els_did,
+ &piocbq->wqe.xmit_els_rsp.wqe_dest);
phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
- evt_dat->immed_dat = piocbq->iocb.ulpContext;
+ evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
evt_dat->type = FC_REG_CT_EVENT;
list_add(&evt_dat->node, &evt->events_to_see);
@@ -1376,13 +1360,13 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
struct bsg_job_data *dd_data;
struct bsg_job *job;
struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
struct lpfc_dmabuf *bmp, *cmp;
struct lpfc_nodelist *ndlp;
unsigned long flags;
int rc = 0;
+ u32 ulp_status, ulp_word4;
- dd_data = cmdiocbq->context1;
+ dd_data = cmdiocbq->context_un.dd_data;
/* Determine if job has been aborted */
spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1395,21 +1379,23 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
/* Close the timeout handler abort window */
spin_lock_irqsave(&phba->hbalock, flags);
- cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
+ cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
spin_unlock_irqrestore(&phba->hbalock, flags);
ndlp = dd_data->context_un.iocb.ndlp;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- rsp = &rspiocbq->iocb;
+ cmp = cmdiocbq->cmd_dmabuf;
+ bmp = cmdiocbq->bpl_dmabuf;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocbq);
+ ulp_word4 = get_job_word4(phba, rspiocbq);
/* Copy the completed job data or set the error status */
if (job) {
bsg_reply = job->reply;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+ if (ulp_status) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch (ulp_word4 & IOERR_PARAM_MASK) {
case IOERR_SEQUENCE_TIMEOUT:
rc = -ETIMEDOUT;
break;
@@ -1459,13 +1445,13 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
int num_entry)
{
- IOCB_t *icmd;
struct lpfc_iocbq *ctiocb = NULL;
int rc = 0;
struct lpfc_nodelist *ndlp = NULL;
struct bsg_job_data *dd_data;
unsigned long flags;
uint32_t creg_val;
+ u16 ulp_context, iotag;
ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
if (!ndlp) {
@@ -1492,70 +1478,44 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
goto no_ctiocb;
}
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
- icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- /* Fill in rest of iocb */
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
- icmd->ulpContext = phba->ct_ctx[tag].rxid;
- icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
- ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
- if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "2721 ndlp null for oxid %x SID %x\n",
- icmd->ulpContext,
- phba->ct_ctx[tag].SID);
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
- /* get a refernece count so the ndlp doesn't go away while
- * we respond
- */
- if (!lpfc_nlp_get(ndlp)) {
- rc = IOCB_ERROR;
- goto issue_ct_rsp_exit;
- }
-
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ phba->ct_ctx[tag].oxid, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
/* The exchange is done, mark the entry as invalid */
phba->ct_ctx[tag].valid = UNSOL_INVALID;
- } else
- icmd->ulpContext = (ushort) tag;
+ iotag = get_wqe_reqtag(ctiocb);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ ctiocb->num_bdes = num_entry;
+ iotag = ctiocb->iocb.ulpIoTag;
+ }
- icmd->ulpTimeout = phba->fc_ratov * 2;
+ ulp_context = get_job_ulpcontext(phba, ctiocb);
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
- icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ ulp_context, iotag, tag, phba->link_state);
- ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
+ ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
ctiocb->vport = phba->pport;
- ctiocb->context1 = dd_data;
- ctiocb->context2 = cmp;
- ctiocb->context3 = bmp;
- ctiocb->context_un.ndlp = ndlp;
- ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
+ ctiocb->context_un.dd_data = dd_data;
+ ctiocb->cmd_dmabuf = cmp;
+ ctiocb->bpl_dmabuf = bmp;
+ ctiocb->ndlp = ndlp;
+ ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
dd_data->type = TYPE_IOCB;
dd_data->set_job = job;
@@ -1582,9 +1542,9 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
if (rc == IOCB_SUCCESS) {
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O had not been completed/released */
- if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
/* open up abort window to timeout handler */
- ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
+ ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return 0; /* done for now */
@@ -2017,8 +1977,6 @@ lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
{
- int rc;
-
if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
"3136 Port still had vfi registered: "
@@ -2028,8 +1986,7 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
phba->vpi_ids[phba->pport->vpi]);
return -EINVAL;
}
- rc = lpfc_issue_reg_vfi(phba->pport);
- return rc;
+ return lpfc_issue_reg_vfi(phba->pport);
}
/**
@@ -2625,7 +2582,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
*
* This function obtains the transmit and receive ids required to send
* an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
- * flags are used to the unsolicted response handler is able to process
+ * flags are used to the unsolicited response handler is able to process
* the ct command sent on the same port.
**/
static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
@@ -2633,7 +2590,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
{
struct lpfc_bsg_event *evt;
struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
struct lpfc_dmabuf *dmabuf;
struct ulp_bde64 *bpl = NULL;
struct lpfc_sli_ct_request *ctreq = NULL;
@@ -2641,6 +2597,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
int time_left;
int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
+ u32 status;
*txxri = 0;
*rxxri = 0;
@@ -2684,9 +2641,6 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
goto err_get_xri_exit;
}
- cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
-
memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
@@ -2696,36 +2650,24 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
ctreq->CommandResponse.bits.Size = 0;
-
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
-
- cmd->un.xseq64.w5.hcsw.Fctl = LA;
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpContext = rpi;
-
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->bpl_dmabuf = dmabuf;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+ cmdiocbq->cmd_cmpl = NULL;
+
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
+ FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
- rspiocbq,
- (phba->fc_ratov * 2)
- + LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
+ rspiocbq, (phba->fc_ratov * 2)
+ + LPFC_DRVR_TIMEOUT);
+
+ status = get_job_ulpstatus(phba, rspiocbq);
+ if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
ret_val = -EIO;
goto err_get_xri_exit;
}
- *txxri = rsp->ulpContext;
+ *txxri = get_job_ulpcontext(phba, rspiocbq);
evt->waiting = 1;
evt->wait_time_stamp = jiffies;
@@ -2926,16 +2868,16 @@ out:
}
/**
- * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
+ * lpfcdiag_sli3_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
* @phba: Pointer to HBA context object
* @rxxri: Receive exchange id
* @len: Number of data bytes
*
* This function allocates and posts a data buffer of sufficient size to receive
- * an unsolicted CT command.
+ * an unsolicited CT command.
**/
-static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
- size_t len)
+static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
+ size_t len)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
@@ -2972,7 +2914,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
/* Queue buffers for the receive exchange */
num_bde = (uint32_t)rxbuffer->flag;
dmp = &rxbuffer->dma;
-
cmd = &cmdiocbq->iocb;
i = 0;
@@ -3040,7 +2981,6 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
ret_val = -EIO;
goto err_post_rxbufs_exit;
}
-
cmd = &cmdiocbq->iocb;
i = 0;
}
@@ -3092,7 +3032,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
- IOCB_t *cmd, *rsp = NULL;
+ union lpfc_wqe128 *cmdwqe, *rspwqe;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
@@ -3185,7 +3125,7 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
goto loopback_test_exit;
}
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
if (rc) {
lpfcdiag_loop_self_unreg(phba, rpi);
goto loopback_test_exit;
@@ -3228,9 +3168,12 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
goto err_loopback_test_exit;
}
- cmd = &cmdiocbq->iocb;
- if (phba->sli_rev < LPFC_SLI_REV4)
- rsp = &rspiocbq->iocb;
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(union lpfc_wqe));
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rspwqe = &rspiocbq->wqe;
+ memset(rspwqe, 0, sizeof(union lpfc_wqe));
+ }
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
@@ -3262,41 +3205,32 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
/* Build the XMIT_SEQUENCE iocb */
num_bde = (uint32_t)txbuffer->flag;
- cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
- cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
- cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
-
- cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
- cmd->un.xseq64.w5.hcsw.Dfctl = 0;
- cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
-
- cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- cmd->ulpBdeCount = 1;
- cmd->ulpLe = 1;
- cmd->ulpClass = CLASS3;
+ cmdiocbq->num_bdes = num_bde;
+ cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
+ cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
+ cmdiocbq->vport = phba->pport;
+ cmdiocbq->cmd_cmpl = NULL;
+ cmdiocbq->bpl_dmabuf = txbmp;
if (phba->sli_rev < LPFC_SLI_REV4) {
- cmd->ulpContext = txxri;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
+ num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+
} else {
- cmd->un.xseq64.bdl.ulpIoTag32 = 0;
- cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
- cmdiocbq->context3 = txbmp;
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
+ phba->sli4_hba.rpi_ids[rpi], 0xffff,
+ full_size, FC_RCTL_DD_UNSOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
cmdiocbq->sli4_xritag = NO_XRI;
- cmd->unsli3.rcvsli3.ox_id = 0xffff;
}
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
- cmdiocbq->vport = phba->pport;
- cmdiocbq->iocb_cmpl = NULL;
+
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
-
- if ((iocb_stat != IOCB_SUCCESS) ||
- ((phba->sli_rev < LPFC_SLI_REV4) &&
- (rsp->ulpStatus != IOSTAT_SUCCESS))) {
+ if (iocb_stat != IOCB_SUCCESS ||
+ (phba->sli_rev < LPFC_SLI_REV4 &&
+ (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"3126 Failed loopback test issue iocb: "
"iocb_stat:x%x\n", iocb_stat);
@@ -3429,7 +3363,7 @@ job_error:
* This is completion handler function for mailbox commands issued from
* lpfc_bsg_issue_mbox function. This function is called by the
* mailbox event handler function with no lock held. This function
- * will wake up thread waiting on the wait queue pointed by context1
+ * will wake up thread waiting on the wait queue pointed by dd_data
* of the mailbox.
**/
static void
@@ -3556,15 +3490,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
"1226 mbox: set_variable 0x%x, 0x%x\n",
mb->un.varWords[0],
mb->un.varWords[1]);
- if ((mb->un.varWords[0] == SETVAR_MLOMNT)
- && (mb->un.varWords[1] == 1)) {
- phba->wait_4_mlo_maint_flg = 1;
- } else if (mb->un.varWords[0] == SETVAR_MLORST) {
- spin_lock_irq(&phba->hbalock);
- phba->link_flag &= ~LS_LOOPBACK_MODE;
- spin_unlock_irq(&phba->hbalock);
- phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
- }
break;
case MBX_READ_SPARM64:
case MBX_REG_LOGIN:
@@ -5046,283 +4971,6 @@ lpfc_bsg_mbox_cmd(struct bsg_job *job)
return rc;
}
-/**
- * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
- * @phba: Pointer to HBA context object.
- * @cmdiocbq: Pointer to command iocb.
- * @rspiocbq: Pointer to response iocb.
- *
- * This function is the completion handler for iocbs issued using
- * lpfc_menlo_cmd function. This function is called by the
- * ring event handler function without any lock held. This function
- * can be called from both worker thread context and interrupt
- * context. This function also can be called from another thread which
- * cleans up the SLI layer objects.
- * This function copies the contents of the response iocb to the
- * response iocb memory object provided by the caller of
- * lpfc_sli_issue_iocb_wait and then wakes up the thread which
- * sleeps for the iocb completion.
- **/
-static void
-lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdiocbq,
- struct lpfc_iocbq *rspiocbq)
-{
- struct bsg_job_data *dd_data;
- struct bsg_job *job;
- struct fc_bsg_reply *bsg_reply;
- IOCB_t *rsp;
- struct lpfc_dmabuf *bmp, *cmp, *rmp;
- struct lpfc_bsg_menlo *menlo;
- unsigned long flags;
- struct menlo_response *menlo_resp;
- unsigned int rsp_size;
- int rc = 0;
-
- dd_data = cmdiocbq->context1;
- cmp = cmdiocbq->context2;
- bmp = cmdiocbq->context3;
- menlo = &dd_data->context_un.menlo;
- rmp = menlo->rmp;
- rsp = &rspiocbq->iocb;
-
- /* Determine if job has been aborted */
- spin_lock_irqsave(&phba->ct_ev_lock, flags);
- job = dd_data->set_job;
- if (job) {
- bsg_reply = job->reply;
- /* Prevent timeout handling from trying to abort job */
- job->dd_data = NULL;
- }
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- /* Copy the job data or set the failing status for the job */
-
- if (job) {
- /* always return the xri, this would be used in the case
- * of a menlo download to allow the data to be sent as a
- * continuation of the exchange.
- */
-
- menlo_resp = (struct menlo_response *)
- bsg_reply->reply_data.vendor_reply.vendor_rsp;
- menlo_resp->xri = rsp->ulpContext;
- if (rsp->ulpStatus) {
- if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
- case IOERR_SEQUENCE_TIMEOUT:
- rc = -ETIMEDOUT;
- break;
- case IOERR_INVALID_RPI:
- rc = -EFAULT;
- break;
- default:
- rc = -EACCES;
- break;
- }
- } else {
- rc = -EACCES;
- }
- } else {
- rsp_size = rsp->un.genreq64.bdl.bdeSize;
- bsg_reply->reply_payload_rcv_len =
- lpfc_bsg_copy_data(rmp, &job->reply_payload,
- rsp_size, 0);
- }
-
- }
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
- lpfc_free_bsg_buffers(phba, cmp);
- lpfc_free_bsg_buffers(phba, rmp);
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
- kfree(dd_data);
-
- /* Complete the job if active */
-
- if (job) {
- bsg_reply->result = rc;
- bsg_job_done(job, bsg_reply->result,
- bsg_reply->reply_payload_rcv_len);
- }
-
- return;
-}
-
-/**
- * lpfc_menlo_cmd - send an ioctl for menlo hardware
- * @job: fc_bsg_job to handle
- *
- * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
- * all the command completions will return the xri for the command.
- * For menlo data requests a gen request 64 CX is used to continue the exchange
- * supplied in the menlo request header xri field.
- **/
-static int
-lpfc_menlo_cmd(struct bsg_job *job)
-{
- struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
- struct fc_bsg_request *bsg_request = job->request;
- struct fc_bsg_reply *bsg_reply = job->reply;
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_iocbq *cmdiocbq;
- IOCB_t *cmd;
- int rc = 0;
- struct menlo_command *menlo_cmd;
- struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
- int request_nseg;
- int reply_nseg;
- struct bsg_job_data *dd_data;
- struct ulp_bde64 *bpl = NULL;
-
- /* in case no data is returned return just the return code */
- bsg_reply->reply_payload_rcv_len = 0;
-
- if (job->request_len <
- sizeof(struct fc_bsg_request) +
- sizeof(struct menlo_command)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2784 Received MENLO_CMD request below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (job->reply_len < sizeof(*bsg_reply) +
- sizeof(struct menlo_response)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2785 Received MENLO_CMD reply below "
- "minimum size\n");
- rc = -ERANGE;
- goto no_dd_data;
- }
-
- if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2786 Adapter does not support menlo "
- "commands\n");
- rc = -EPERM;
- goto no_dd_data;
- }
-
- menlo_cmd = (struct menlo_command *)
- bsg_request->rqst_data.h_vendor.vendor_cmd;
-
- /* allocate our bsg tracking structure */
- dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
- if (!dd_data) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
- "2787 Failed allocation of dd_data\n");
- rc = -ENOMEM;
- goto no_dd_data;
- }
-
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp) {
- rc = -ENOMEM;
- goto free_dd;
- }
-
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
- if (!bmp->virt) {
- rc = -ENOMEM;
- goto free_bmp;
- }
-
- INIT_LIST_HEAD(&bmp->list);
-
- bpl = (struct ulp_bde64 *)bmp->virt;
- request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
- cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
- 1, bpl, &request_nseg);
- if (!cmp) {
- rc = -ENOMEM;
- goto free_bmp;
- }
- lpfc_bsg_copy_data(cmp, &job->request_payload,
- job->request_payload.payload_len, 1);
-
- bpl += request_nseg;
- reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
- rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
- bpl, &reply_nseg);
- if (!rmp) {
- rc = -ENOMEM;
- goto free_cmp;
- }
-
- cmdiocbq = lpfc_sli_get_iocbq(phba);
- if (!cmdiocbq) {
- rc = -ENOMEM;
- goto free_rmp;
- }
-
- cmd = &cmdiocbq->iocb;
- cmd->un.genreq64.bdl.ulpIoTag32 = 0;
- cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- cmd->un.genreq64.bdl.bdeSize =
- (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
- cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- cmd->un.genreq64.w5.hcsw.Dfctl = 0;
- cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
- cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
- cmd->ulpBdeCount = 1;
- cmd->ulpClass = CLASS3;
- cmd->ulpOwner = OWN_CHIP;
- cmd->ulpLe = 1; /* Limited Edition */
- cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
- cmdiocbq->vport = phba->pport;
- /* We want the firmware to timeout before we do */
- cmd->ulpTimeout = MENLO_TIMEOUT - 5;
- cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
- cmdiocbq->context1 = dd_data;
- cmdiocbq->context2 = cmp;
- cmdiocbq->context3 = bmp;
- if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
- cmd->ulpPU = MENLO_PU; /* 3 */
- cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
- cmd->ulpContext = MENLO_CONTEXT; /* 0 */
- } else {
- cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
- cmd->ulpPU = 1;
- cmd->un.ulpWord[4] = 0;
- cmd->ulpContext = menlo_cmd->xri;
- }
-
- dd_data->type = TYPE_MENLO;
- dd_data->set_job = job;
- dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
- dd_data->context_un.menlo.rmp = rmp;
- job->dd_data = dd_data;
-
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
- MENLO_TIMEOUT - 5);
- if (rc == IOCB_SUCCESS)
- return 0; /* done for now */
-
- lpfc_sli_release_iocbq(phba, cmdiocbq);
-
-free_rmp:
- lpfc_free_bsg_buffers(phba, rmp);
-free_cmp:
- lpfc_free_bsg_buffers(phba, cmp);
-free_bmp:
- if (bmp->virt)
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
- kfree(bmp);
-free_dd:
- kfree(dd_data);
-no_dd_data:
- /* make error code available to userspace */
- bsg_reply->result = rc;
- job->dd_data = NULL;
- return rc;
-}
-
static int
lpfc_forced_link_speed(struct bsg_job *job)
{
@@ -5877,10 +5525,6 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_MBOX:
rc = lpfc_bsg_mbox_cmd(job);
break;
- case LPFC_BSG_VENDOR_MENLO_CMD:
- case LPFC_BSG_VENDOR_MENLO_DATA:
- rc = lpfc_menlo_cmd(job);
- break;
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
@@ -6001,7 +5645,7 @@ lpfc_bsg_timeout(struct bsg_job *job)
spin_lock_irqsave(&phba->hbalock, flags);
/* make sure the I/O abort window is still open */
- if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
+ if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return -EAGAIN;
}
@@ -6033,31 +5677,6 @@ lpfc_bsg_timeout(struct bsg_job *job)
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
- case TYPE_MENLO:
- /* Check to see if IOCB was issued to the port or not. If not,
- * remove it from the txq queue and call cancel iocbs.
- * Otherwise, call abort iotag.
- */
- cmdiocb = dd_data->context_un.menlo.cmdiocbq;
- spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
- list) {
- if (check_iocb == cmdiocb) {
- list_move_tail(&check_iocb->list, &completions);
- break;
- }
- }
- if (list_empty(&completions))
- lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
- spin_unlock_irqrestore(&phba->hbalock, flags);
- if (!list_empty(&completions)) {
- lpfc_sli_cancel_iocbs(phba, &completions,
- IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
- }
- break;
default:
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 749d6c43cfce..3c04ca2d7455 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -33,8 +33,6 @@
#define LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK 5
#define LPFC_BSG_VENDOR_GET_MGMT_REV 6
#define LPFC_BSG_VENDOR_MBOX 7
-#define LPFC_BSG_VENDOR_MENLO_CMD 8
-#define LPFC_BSG_VENDOR_MENLO_DATA 9
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
@@ -131,16 +129,6 @@ struct dfc_mbox_req {
uint32_t extSeqNum;
};
-/* Used for menlo command or menlo data. The xri is only used for menlo data */
-struct menlo_command {
- uint32_t cmd;
- uint32_t xri;
-};
-
-struct menlo_response {
- uint32_t xri; /* return the xri of the iocb exchange */
-};
-
/*
* macros and data structures for handling sli-config mailbox command
* pass-through support, this header file is shared between user and
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 89e36bf14d8f..d2d207791056 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -32,7 +32,9 @@ int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
-
+int lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox);
+void lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+ enum lpfc_mbox_ctx locked);
void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *);
void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -76,6 +78,7 @@ int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, int count, int idx);
+int lpfc_read_lds_params(struct lpfc_hba *phba);
uint32_t lpfc_calc_cmf_latency(struct lpfc_hba *phba);
void lpfc_cmf_signal_init(struct lpfc_hba *phba);
void lpfc_cmf_start(struct lpfc_hba *phba);
@@ -90,6 +93,14 @@ void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
void lpfc_unblock_requests(struct lpfc_hba *phba);
void lpfc_block_requests(struct lpfc_hba *phba);
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries);
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor);
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry);
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -129,6 +140,7 @@ void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
void lpfc_cleanup(struct lpfc_vport *);
+void lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd);
void lpfc_disc_timeout(struct timer_list *);
int lpfc_unregister_fcf_prep(struct lpfc_hba *);
@@ -190,6 +202,7 @@ void lpfc_els_timeout_handler(struct lpfc_vport *);
struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
uint8_t, struct lpfc_nodelist *,
uint32_t, uint32_t);
+void lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job);
void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -211,7 +224,7 @@ int lpfc_sli4_refresh_params(struct lpfc_hba *phba);
int lpfc_hba_down_prep(struct lpfc_hba *);
int lpfc_hba_down_post(struct lpfc_hba *);
void lpfc_hba_init(struct lpfc_hba *, uint32_t *);
-int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
+int lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
@@ -351,6 +364,22 @@ int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe);
int lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb, void *cmpl);
+void lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp);
+void lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry,
+ u8 tmo);
+void lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq,
+ u8 cr_cx_cmd);
+void lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia, bool wqec);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);
@@ -400,8 +429,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
-void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
- struct lpfc_wcqe_complete *w);
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
@@ -414,6 +441,7 @@ void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+void lpfc_setup_fdmi_mask(struct lpfc_vport *vport);
int lpfc_link_reset(struct lpfc_vport *vport);
/* Function prototypes. */
@@ -435,6 +463,7 @@ extern const struct attribute_group *lpfc_hba_groups[];
extern const struct attribute_group *lpfc_vport_groups[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_nvme;
+extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
@@ -609,7 +638,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl);
+ struct lpfc_iocbq *rspiocb);
void lpfc_create_multixri_pools(struct lpfc_hba *phba);
void lpfc_create_destroy_pools(struct lpfc_hba *phba);
void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
@@ -650,5 +679,11 @@ int lpfc_vmid_cmd(struct lpfc_vport *vport,
int lpfc_vmid_hash_fn(const char *vmid, int len);
struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
uint32_t hash, uint8_t *buf);
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index dfcb7d4bd7fa..e941a99aa965 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -87,12 +87,12 @@ lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0146 Ignoring unsolicited CT No HBQ "
"status = x%x\n",
- piocbq->iocb.ulpStatus);
+ get_job_ulpstatus(phba, piocbq));
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "0145 Ignoring unsolicted CT HBQ Size:%d "
+ "0145 Ignoring unsolicited CT HBQ Size:%d "
"status = x%x\n",
- size, piocbq->iocb.ulpStatus);
+ size, get_job_ulpstatus(phba, piocbq));
}
static void
@@ -118,22 +118,22 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *mp, *bmp;
- ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ ndlp = cmdiocb->ndlp;
if (ndlp)
lpfc_nlp_put(ndlp);
- mp = cmdiocb->context2;
- bmp = cmdiocb->context3;
+ mp = cmdiocb->rsp_dmabuf;
+ bmp = cmdiocb->bpl_dmabuf;
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- cmdiocb->context2 = NULL;
+ cmdiocb->rsp_dmabuf = NULL;
}
if (bmp) {
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
kfree(bmp);
- cmdiocb->context3 = NULL;
+ cmdiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -143,7 +143,7 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
* @ndlp: pointer to a node-list data structure.
* @ct_req: pointer to the CT request data structure.
- * @rx_id: rx_id of the received UNSOL CT command
+ * @ulp_context: context of received UNSOL CT command
* @ox_id: ox_id of the UNSOL CT command
*
* This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
@@ -152,7 +152,7 @@ lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
static void
lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
struct lpfc_sli_ct_request *ct_req,
- u16 rx_id, u16 ox_id)
+ u16 ulp_context, u16 ox_id)
{
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
@@ -161,8 +161,8 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
struct lpfc_dmabuf *bmp = NULL;
struct lpfc_dmabuf *mp = NULL;
struct ulp_bde64 *bpl;
- IOCB_t *icmd;
u8 rc = 0;
+ u32 tmo;
/* fill in BDEs for command */
mp = kmalloc(sizeof(*mp), GFP_KERNEL);
@@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
memset(bpl, 0, sizeof(struct ulp_bde64));
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
bpl->tus.w = le32_to_cpu(bpl->tus.w);
@@ -220,43 +220,40 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
goto ct_free_bmpvirt;
}
- icmd = &cmdiocbq->iocb;
- icmd->un.genreq64.bdl.ulpIoTag32 = 0;
- icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.genreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.genreq64.w5.hcsw.Fctl = (LS | LA);
- icmd->un.genreq64.w5.hcsw.Dfctl = 0;
- icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
- icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
- icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
+ ox_id, 1, FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_WQE);
+ } else {
+ lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1,
+ FC_RCTL_DD_SOL_CTL, 1,
+ CMD_XMIT_SEQUENCE64_CX);
+ }
/* Save for completion so we can release these resources */
- cmdiocbq->context1 = lpfc_nlp_get(ndlp);
- cmdiocbq->context2 = (uint8_t *)mp;
- cmdiocbq->context3 = (uint8_t *)bmp;
- cmdiocbq->iocb_cmpl = lpfc_ct_unsol_cmpl;
- icmd->ulpContext = rx_id; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = ox_id;
- icmd->un.ulpWord[3] =
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- icmd->ulpTimeout = (3 * phba->fc_ratov);
+ cmdiocbq->rsp_dmabuf = mp;
+ cmdiocbq->bpl_dmabuf = bmp;
+ cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
+ tmo = (3 * phba->fc_ratov);
cmdiocbq->retry = 0;
cmdiocbq->vport = vport;
- cmdiocbq->context_un.ndlp = NULL;
- cmdiocbq->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
+
+ cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
+ if (!cmdiocbq->ndlp)
+ goto ct_no_ndlp;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
- if (!rc)
- return;
+ if (rc) {
+ lpfc_nlp_put(ndlp);
+ goto ct_no_ndlp;
+ }
+ return;
+ct_no_ndlp:
rc = 6;
- lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, cmdiocbq);
ct_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -286,25 +283,17 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
{
struct lpfc_sli_ct_request *ct_req;
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_vport *vport = NULL;
- IOCB_t *icmd = &ctiocbq->iocb;
- u32 mi_cmd, vpi;
- u32 did = 0;
-
- vpi = ctiocbq->iocb.unsli3.rcvsli3.vpi;
- vport = lpfc_find_vport_by_vpid(phba, vpi);
- if (!vport) {
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "6437 Unsol CT: VPORT NULL vpi : x%x\n",
- vpi);
- return;
- }
-
- did = ctiocbq->iocb.un.rcvels.remoteID;
- if (icmd->ulpStatus) {
+ struct lpfc_vport *vport = ctiocbq->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, ctiocbq);
+ u32 ulp_word4 = get_job_word4(phba, ctiocbq);
+ u32 did;
+ u32 mi_cmd;
+
+ did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp);
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6438 Unsol CT: status:x%x/x%x did : x%x\n",
- icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
return;
}
@@ -320,15 +309,16 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
return;
}
- ct_req = ((struct lpfc_sli_ct_request *)
- (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+ ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
mi_cmd = ct_req->CommandResponse.bits.CmdRsp;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
lpfc_ct_reject_event(ndlp, ct_req,
- ctiocbq->iocb.ulpContext,
- ctiocbq->iocb.unsli3.rcvsli3.ox_id);
+ bf_get(wqe_ctxt_tag,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com),
+ bf_get(wqe_rcvoxid,
+ &ctiocbq->wqe.xmit_els_rsp.wqe_com));
}
/**
@@ -351,55 +341,51 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *icmd = &ctiocbq->iocb;
int i;
struct lpfc_iocbq *iocbq;
+ struct lpfc_iocbq *iocb;
dma_addr_t dma_addr;
uint32_t size;
struct list_head head;
struct lpfc_sli_ct_request *ct_req;
- struct lpfc_dmabuf *bdeBuf1 = ctiocbq->context2;
- struct lpfc_dmabuf *bdeBuf2 = ctiocbq->context3;
-
- ctiocbq->context1 = NULL;
- ctiocbq->context2 = NULL;
- ctiocbq->context3 = NULL;
+ struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf;
+ u32 status, parameter, bde_count = 0;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
+
+ ctiocbq->cmd_dmabuf = NULL;
+ ctiocbq->rsp_dmabuf = NULL;
+ ctiocbq->bpl_dmabuf = NULL;
+
+ wcqe_cmpl = &ctiocbq->wcqe_cmpl;
+ status = get_job_ulpstatus(phba, ctiocbq);
+ parameter = get_job_word4(phba, ctiocbq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = icmd->ulpBdeCount;
- if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
+ if (unlikely(status == IOSTAT_NEED_BUFFER)) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if ((status == IOSTAT_LOCAL_REJECT) &&
+ ((parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING)) {
/* Not enough posted buffers; Try posting more buffers */
phba->fc_stat.NoRcvBuf++;
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 2);
+ lpfc_sli3_post_buffer(phba, pring, 2);
return;
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
- if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- ctiocbq->context2 = bdeBuf1;
- if (icmd->ulpBdeCount == 2)
- ctiocbq->context3 = bdeBuf2;
- } else {
- dma_addr = getPaddr(icmd->un.cont64[0].addrHigh,
- icmd->un.cont64[0].addrLow);
- ctiocbq->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
- dma_addr);
- if (icmd->ulpBdeCount == 2) {
- dma_addr = getPaddr(icmd->un.cont64[1].addrHigh,
- icmd->un.cont64[1].addrLow);
- ctiocbq->context3 = lpfc_sli_ringpostbuf_get(phba,
- pring,
- dma_addr);
- }
- }
+ ctiocbq->cmd_dmabuf = bdeBuf1;
+ if (bde_count == 2)
+ ctiocbq->bpl_dmabuf = bdeBuf2;
- ct_req = ((struct lpfc_sli_ct_request *)
- (((struct lpfc_dmabuf *)ctiocbq->context2)->virt));
+ ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
@@ -412,19 +398,29 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
INIT_LIST_HEAD(&head);
list_add_tail(&head, &ctiocbq->list);
- list_for_each_entry(iocbq, &head, list) {
- icmd = &iocbq->iocb;
- if (icmd->ulpBdeCount == 0)
+ list_for_each_entry(iocb, &head, list) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = iocb->wcqe_cmpl.word3;
+ else
+ bde_count = iocb->iocb.ulpBdeCount;
+
+ if (!bde_count)
continue;
- bdeBuf1 = iocbq->context2;
- iocbq->context2 = NULL;
- size = icmd->un.cont64[0].tus.f.bdeSize;
+ bdeBuf1 = iocb->cmd_dmabuf;
+ iocb->cmd_dmabuf = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
+ else
+ size = iocb->iocb.un.cont64[0].tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
lpfc_in_buf_free(phba, bdeBuf1);
- if (icmd->ulpBdeCount == 2) {
- bdeBuf2 = iocbq->context3;
- iocbq->context3 = NULL;
- size = icmd->unsli3.rcvsli3.bde2.tus.f.bdeSize;
+ if (bde_count == 2) {
+ bdeBuf2 = iocb->bpl_dmabuf;
+ iocb->bpl_dmabuf = NULL;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ size = iocb->unsol_rcv_len;
+ else
+ size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize;
lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
size);
lpfc_in_buf_free(phba, bdeBuf2);
@@ -447,7 +443,7 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
lpfc_in_buf_free(phba, mp);
}
- lpfc_post_buffer(phba, pring, i);
+ lpfc_sli3_post_buffer(phba, pring, i);
}
list_del(&head);
}
@@ -551,24 +547,25 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
{
struct lpfc_dmabuf *buf_ptr;
- /* I/O job is complete so context is now invalid*/
- ctiocb->context_un.ndlp = NULL;
- if (ctiocb->context1) {
- buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
+ /* IOCBQ job structure gets cleaned during release. Just release
+ * the dma buffers here.
+ */
+ if (ctiocb->cmd_dmabuf) {
+ buf_ptr = ctiocb->cmd_dmabuf;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context1 = NULL;
+ ctiocb->cmd_dmabuf = NULL;
}
- if (ctiocb->context2) {
- lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
- ctiocb->context2 = NULL;
+ if (ctiocb->rsp_dmabuf) {
+ lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf);
+ ctiocb->rsp_dmabuf = NULL;
}
- if (ctiocb->context3) {
- buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
+ if (ctiocb->bpl_dmabuf) {
+ buf_ptr = ctiocb->bpl_dmabuf;
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- ctiocb->context3 = NULL;
+ ctiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, ctiocb);
return 0;
@@ -588,15 +585,15 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
static int
lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
- void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *),
+ void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd;
struct lpfc_iocbq *geniocb;
int rc;
+ u16 ulp_context;
/* Allocate buffer for command iocb */
geniocb = lpfc_sli_get_iocbq(phba);
@@ -604,65 +601,45 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
if (geniocb == NULL)
return 1;
- icmd = &geniocb->iocb;
- icmd->un.genreq64.bdl.ulpIoTag32 = 0;
- icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
- icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
- icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ /* Update the num_entry bde count */
+ geniocb->num_bdes = num_entry;
- geniocb->context3 = (uint8_t *) bmp;
+ geniocb->bpl_dmabuf = bmp;
/* Save for completion so we can release these resources */
- geniocb->context1 = (uint8_t *) inp;
- geniocb->context2 = (uint8_t *) outp;
+ geniocb->cmd_dmabuf = inp;
+ geniocb->rsp_dmabuf = outp;
geniocb->event_tag = event_tag;
- /* Fill in payload, bp points to frame payload */
- icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
-
- /* Fill in rest of iocb */
- icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
- icmd->un.genreq64.w5.hcsw.Dfctl = 0;
- icmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
- icmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
-
if (!tmo) {
/* FC spec states we need 3 * ratov for CT requests */
tmo = (3 * phba->fc_ratov);
}
- icmd->ulpTimeout = tmo;
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = ndlp->nlp_rpi;
+
if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
+ else
+ ulp_context = ndlp->nlp_rpi;
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- /* For GEN_REQUEST64_CR, use the RPI */
- icmd->ulpCt_h = 0;
- icmd->ulpCt_l = 0;
- }
+ lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo);
/* Issue GEN REQ IOCB for NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0119 Issue GEN REQ IOCB to NPORT x%x "
"Data: x%x x%x\n",
- ndlp->nlp_DID, icmd->ulpIoTag,
+ ndlp->nlp_DID, geniocb->iotag,
vport->port_state);
- geniocb->iocb_cmpl = cmpl;
- geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
+ geniocb->cmd_cmpl = cmpl;
+ geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
geniocb->vport = vport;
geniocb->retry = retry;
- geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
- if (!geniocb->context_un.ndlp)
+ geniocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!geniocb->ndlp)
goto out;
- rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
+ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
- geniocb->context_un.ndlp = NULL;
lpfc_nlp_put(ndlp);
goto out;
}
@@ -938,26 +915,26 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc, type;
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ cmdiocb->rsp_iocb = rspiocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_FT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
+ ulp_status, ulp_word4, vport->fc_ns_retry);
/* Ignore response if link flipped after this request was made */
if (cmdiocb->event_tag != phba->fc_eventTag) {
@@ -981,11 +958,17 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0226 NS query failed due to link event\n");
+ "0226 NS query failed due to link event: "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "port_state x%x gidft_inp x%x\n",
+ ulp_status, ulp_word4, vport->fc_flag,
+ vport->port_state, vport->gidft_inp);
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
goto out;
}
@@ -1013,11 +996,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
@@ -1040,7 +1023,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0257 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *) inp->virt;
@@ -1054,12 +1037,12 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
@@ -1154,26 +1137,26 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp;
struct lpfc_dmabuf *outp;
struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
int rc;
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *)cmdiocb->context1;
- outp = (struct lpfc_dmabuf *)cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ cmdiocb->rsp_iocb = rspiocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GID_PT cmpl: status:x%x/x%x rtry:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_ns_retry);
/* Ignore response if link flipped after this request was made */
@@ -1198,11 +1181,17 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
goto out;
}
- if (lpfc_error_lost_link(irsp)) {
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "4166 NS query failed due to link event\n");
+ "4166 NS query failed due to link event: "
+ "ulp_status x%x ulp_word4 x%x fc_flag x%x "
+ "port_state x%x gidft_inp x%x\n",
+ ulp_status, ulp_word4, vport->fc_flag,
+ vport->port_state, vport->gidft_inp);
if (vport->fc_flag & FC_RSCN_MODE)
lpfc_els_flush_rscn(vport);
+ if (vport->gidft_inp)
+ vport->gidft_inp--;
goto out;
}
@@ -1230,11 +1219,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
spin_unlock_irq(shost->host_lock);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
- if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ if (ulp_status != IOSTAT_LOCAL_REJECT ||
+ (ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
@@ -1253,7 +1242,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"4103 GID_FT Query error: 0x%x 0x%x\n",
- irsp->ulpStatus, vport->fc_ns_retry);
+ ulp_status, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
CTreq = (struct lpfc_sli_ct_request *)inp->virt;
@@ -1267,12 +1256,12 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
CTreq->un.gid.Fc4Type,
vport->num_disc_nodes,
vport->gidft_inp,
- irsp->un.genreq64.bdl.bdeSize);
+ get_job_data_placed(phba, rspiocb));
lpfc_ns_rsp(vport,
outp,
CTreq->un.gid.Fc4Type,
- (uint32_t)(irsp->un.genreq64.bdl.bdeSize));
+ get_job_data_placed(phba, rspiocb));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
/* NameServer Rsp Error */
@@ -1367,20 +1356,21 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
int did, rc, retry;
uint8_t fbits;
struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFF_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
if (cmdiocb->event_tag != phba->fc_eventTag) {
@@ -1389,7 +1379,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto iocb_free;
}
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
@@ -1419,8 +1409,8 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
retry = 1;
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] &
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 &
IOERR_PARAM_MASK)) {
case IOERR_NO_RESOURCES:
@@ -1446,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmdiocb->retry, did);
if (rc == 0) {
/* success */
- free_ndlp = cmdiocb->context_un.ndlp;
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
return;
@@ -1456,7 +1446,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0267 NameServer GFF Rsp "
"x%x Error (%d %d) Data: x%x x%x\n",
- did, irsp->ulpStatus, irsp->un.ulpWord[4],
+ did, ulp_status, ulp_word4,
vport->fc_flag, vport->fc_rscn_id_cnt);
}
@@ -1503,7 +1493,7 @@ out:
}
iocb_free:
- free_ndlp = cmdiocb->context_un.ndlp;
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
return;
@@ -1511,36 +1501,34 @@ iocb_free:
static void
lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
- struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTrsp;
int did;
struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_nodelist *ns_ndlp = NULL;
+ struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp;
uint32_t fc4_data_0, fc4_data_1;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
did = be32_to_cpu(did);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"GFT_ID cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], did);
+ ulp_status, ulp_word4, did);
/* Ignore response if link flipped after this request was made */
- if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) {
+ if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"9046 Event tag mismatch. Ignoring NS rsp\n");
goto out;
}
- /* Preserve the nameserver node to release the reference. */
- ns_ndlp = cmdiocb->context_un.ndlp;
-
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
/* Good status, continue checking */
CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
@@ -1601,7 +1589,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
} else
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+ "3065 GFT_ID failed x%08x\n", ulp_status);
out:
lpfc_ct_free_iocb(phba, cmdiocb);
@@ -1615,22 +1603,22 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_dmabuf *inp;
struct lpfc_dmabuf *outp;
- IOCB_t *irsp;
struct lpfc_sli_ct_request *CTrsp;
struct lpfc_nodelist *ndlp;
int cmdcode, rc;
uint8_t retry;
uint32_t latt;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* First save ndlp, before we overwrite it */
- ndlp = cmdiocb->context_un.ndlp;
+ ndlp = cmdiocb->ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- inp = (struct lpfc_dmabuf *) cmdiocb->context1;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- irsp = &rspiocb->iocb;
+ inp = cmdiocb->cmd_dmabuf;
+ outp = cmdiocb->rsp_dmabuf;
cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
CommandResponse.bits.CmdRsp);
@@ -1638,28 +1626,28 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
latt = lpfc_els_chk_latt(vport);
- /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
+ /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0209 CT Request completes, latt %d, "
- "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
- latt, irsp->ulpStatus,
+ "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n",
+ latt, ulp_status,
CTrsp->CommandResponse.bits.CmdRsp,
- cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
+ get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"CT cmd cmpl: status:x%x/x%x cmd:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
+ ulp_status, ulp_word4, cmdcode);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0268 NS cmd x%x Error (x%x x%x)\n",
- cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
+ cmdcode, ulp_status, ulp_word4);
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_DOWN) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_SLI_ABORTED)))
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_DOWN) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_SLI_ABORTED)))
goto out;
retry = cmdiocb->retry;
@@ -1684,15 +1672,15 @@ static void
lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFT_ID;
@@ -1705,14 +1693,14 @@ static void
lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ outp = cmdiocb->rsp_dmabuf;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1726,15 +1714,15 @@ static void
lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RSPN_ID;
@@ -1747,14 +1735,14 @@ static void
lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
+ outp = cmdiocb->rsp_dmabuf;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
@@ -1780,15 +1768,15 @@ static void
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_vport *vport = cmdiocb->vport;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
struct lpfc_dmabuf *outp;
struct lpfc_sli_ct_request *CTrsp;
- outp = (struct lpfc_dmabuf *) cmdiocb->context2;
- CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
+ outp = cmdiocb->rsp_dmabuf;
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC))
vport->ct_flags |= FC_CT_RFF_ID;
@@ -1884,7 +1872,7 @@ lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
struct lpfc_dmabuf *mp;
uint32_t type;
- mp = cmdiocb->context1;
+ mp = cmdiocb->cmd_dmabuf;
if (mp == NULL)
return 0;
CtReq = (struct lpfc_sli_ct_request *)mp->virt;
@@ -2037,28 +2025,30 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_RFT_ID);
- CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
+ CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID);
+
+ /* Register Application Services type if vmid enabled. */
+ if (phba->cfg_vmid_app_header)
+ CtReq->un.rft.app_serv_reg =
+ cpu_to_be32(RFT_APP_SERV_REG);
/* Register FC4 FCP type if enabled. */
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
- CtReq->un.rft.fcpReg = 1;
+ CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG);
- /* Register NVME type if enabled. Defined LE and swapped.
- * rsvd[0] is used as word1 because of the hard-coded
- * word0 usage in the ct_request data structure.
- */
+ /* Register NVME type if enabled. */
if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
- CtReq->un.rft.rsvd[0] =
- cpu_to_be32(LPFC_FC4_TYPE_BITMASK);
+ CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG);
ptr = (uint32_t *)CtReq;
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "6433 Issue RFT (%s %s): %08x %08x %08x %08x "
- "%08x %08x %08x %08x\n",
- CtReq->un.rft.fcpReg ? "FCP" : " ",
- CtReq->un.rft.rsvd[0] ? "NVME" : " ",
+ "6433 Issue RFT (%s %s %s): %08x %08x %08x "
+ "%08x %08x %08x %08x %08x\n",
+ CtReq->un.rft.fcp_reg ? "FCP" : " ",
+ CtReq->un.rft.nvme_reg ? "NVME" : " ",
+ CtReq->un.rft.app_serv_reg ? "APPS" : " ",
*ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
*(ptr + 4), *(ptr + 5),
*(ptr + 6), *(ptr + 7));
@@ -2175,6 +2165,41 @@ ns_cmd_exit:
}
/**
+ * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
+ * @phba: Pointer to HBA context object.
+ * @mask: Initial port attributes mask
+ *
+ * This function checks to see if any vports have deferred their FDMI RPRT.
+ * A vports RPRT may be deferred if it is issued before the primary ports
+ * RHBA completes.
+ */
+static void
+lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
+{
+ struct lpfc_vport **vports;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ int i;
+
+ phba->hba_flag |= HBA_RHBA_CMPL;
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports) {
+ for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ vport = vports[i];
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ continue;
+ if (vport->ct_flags & FC_CT_RPRT_DEFER) {
+ vport->ct_flags &= ~FC_CT_RPRT_DEFER;
+ vport->fdmi_port_mask = mask;
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
* lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to the command IOCBQ.
@@ -2188,26 +2213,27 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct lpfc_dmabuf *inp = cmdiocb->context1;
- struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *CTcmd = inp->virt;
struct lpfc_sli_ct_request *CTrsp = outp->virt;
uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
uint32_t latt, cmd, err;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
latt = lpfc_els_chk_latt(vport);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"FDMI cmpl: status:x%x/x%x latt:%d",
- irsp->ulpStatus, irsp->un.ulpWord[4], latt);
+ ulp_status, ulp_word4, latt);
- if (latt || irsp->ulpStatus) {
+ if (latt || ulp_status) {
/* Look for a retryable error */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ if (ulp_status == IOSTAT_LOCAL_REJECT) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_SLI_ABORTED:
case IOERR_SLI_DOWN:
/* Driver aborted this IO. No retry as error
@@ -2237,12 +2263,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
"0229 FDMI cmd %04x failed, latt = %d "
- "ulpStatus: x%x, rid x%x\n",
- be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ "ulp_status: x%x, rid x%x\n",
+ be16_to_cpu(fdmi_cmd), latt, ulp_status,
+ ulp_word4);
}
- free_ndlp = cmdiocb->context_un.ndlp;
+ free_ndlp = cmdiocb->ndlp;
lpfc_ct_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -2254,15 +2280,19 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = be16_to_cpu(fdmi_cmd);
if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) {
/* FDMI rsp failed */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
"0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
/* Should we fallback to FDMI-2 / FDMI-1 ? */
switch (cmd) {
case SLI_MGMT_RHBA:
if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
- /* Fallback to FDMI-1 */
+ /* Fallback to FDMI-1 for HBA attributes */
vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
+
+ /* If HBA attributes are FDMI1, so should
+ * port attributes be for consistency.
+ */
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
/* Start over */
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
@@ -2270,6 +2300,11 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
case SLI_MGMT_RPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
+ }
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
/* Fallback to FDMI-1 */
vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
@@ -2290,9 +2325,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->link_flag &= ~LS_CT_VEN_RPA;
if (phba->cmf_active_mode == LPFC_CFG_OFF)
return;
- lpfc_printf_log(phba, KERN_ERR,
+ lpfc_printf_log(phba, KERN_WARNING,
LOG_DISCOVERY | LOG_ELS,
- "6460 VEN FDMI RPA failure\n");
+ "6460 VEN FDMI RPA RJT\n");
return;
}
if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
@@ -2319,6 +2354,9 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
switch (cmd) {
case SLI_MGMT_RHBA:
+ /* Check for any RPRTs deferred till after RHBA completes */
+ lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask);
+
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
break;
@@ -2327,10 +2365,26 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case SLI_MGMT_DPRT:
- if (vport->port_type == LPFC_PHYSICAL_PORT)
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
- else
- lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ } else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
+
+ /* Only issue a RPRT for the vport if the RHBA
+ * for the physical port completes successfully.
+ * We may have to defer the RPRT accordingly.
+ */
+ if (phba->hba_flag & HBA_RHBA_CMPL) {
+ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY,
+ "6078 RPRT deferred\n");
+ vport->ct_flags |= FC_CT_RPRT_DEFER;
+ }
+ }
break;
case SLI_MGMT_RPA:
if (vport->port_type == LPFC_PHYSICAL_PORT &&
@@ -2345,7 +2399,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_DISCOVERY | LOG_CGN_MGMT,
"6210 Issue Vendor MI FDMI %x\n",
phba->sli4_hba.pc_sli4_params.mi_ver);
@@ -2414,6 +2469,9 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
phba->link_flag &= ~LS_CT_VEN_RPA;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
} else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
}
@@ -2435,939 +2493,625 @@ lpfc_fdmi_change_check(struct lpfc_vport *vport)
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
LPFC_FDMI_PORT_ATTR_num_disc);
} else {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return;
lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
LPFC_FDMI_PORT_ATTR_num_disc);
}
}
-/* Routines for all individual HBA attributes */
-static int
-lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_fdmi_attr_u32 *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ ae->value_u32 = cpu_to_be32(attrval);
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NODENAME);
return size;
}
-static int
-lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+
+static inline int
+lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_wwn *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(ae->name, wwn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- /* This string MUST be consistent with other FC platforms
- * supported by Broadcom.
- */
- strncpy(ae->un.AttrString,
- "Emulex Corporation",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MANUFACTURER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype,
+ struct lpfc_name *wwnn, struct lpfc_name *wwpn)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fullwwn *ae = attr;
+ u8 *nname = ae->nname;
+ u8 *pname = ae->pname;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+ /* WWN's assumed to be bytestreams - Big Endian presentation */
+ memcpy(nname, wwnn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
+ memcpy(pname, wwpn,
+ min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
- strncpy(ae->un.AttrString, phba->SerialNumber,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SERIAL_NUMBER);
return size;
}
-static int
-lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+static inline int
+lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_string *ae = attr;
+ int len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ /*
+ * We are trusting the caller that if a fdmi string field
+ * is capped at 64 bytes, the caller passes in a string of
+ * 64 bytes or less.
+ */
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
+ strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
+ len = strnlen(ae->value_string, sizeof(ae->value_string));
+ /* round string length to a 32bit boundary. Ensure there's a NULL */
len += (len & 3) ? (4 - (len & 3)) : 4;
+ /* size is Type/Len (4 bytes) plus string length */
size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL);
+
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
return size;
}
-static int
-lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+/* Bitfields for FC4 Types that can be reported */
+#define ATTR_FC4_CT 0x00000001
+#define ATTR_FC4_FCP 0x00000002
+#define ATTR_FC4_NVME 0x00000004
+
+static inline int
+lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_fdmi_attr_fc4types *ae = attr;
+ int size = sizeof(*ae);
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ ae->type = cpu_to_be16(attrtype);
+ ae->len = cpu_to_be16(size);
+
+ if (typemask & ATTR_FC4_FCP)
+ ae->value_types[2] = 0x01; /* Type 0x8 - FCP */
+
+ if (typemask & ATTR_FC4_CT)
+ ae->value_types[7] = 0x01; /* Type 0x20 - CT */
+
+ if (typemask & ATTR_FC4_NVME)
+ ae->value_types[6] = 0x01; /* Type 0x28 - NVME */
- strncpy(ae->un.AttrString, phba->ModelDesc,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MODEL_DESCRIPTION);
return size;
}
+/* Routines for all individual HBA attributes */
static int
-lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- lpfc_vpd_t *vp = &phba->vpd;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t i, j, incr, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- /* Convert JEDEC ID to ascii for hardware version */
- incr = vp->rev.biuRev;
- for (i = 0; i < 8; i++) {
- j = (incr & 0xf);
- if (j <= 9)
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x30 +
- (uint8_t) j);
- else
- ae->un.AttrString[7 - i] =
- (char)((uint8_t) 0x61 +
- (uint8_t) (j - 10));
- incr = (incr >> 4);
- }
- size = FOURBYTES + 8;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_HARDWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+ */
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER,
+ "Emulex Corporation");
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr)
+{
+ struct lpfc_hba *phba = vport->phba;
- strncpy(ae->un.AttrString, lpfc_release_version,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_DRIVER_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER,
+ phba->SerialNumber);
}
static int
-lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- if (phba->sli_rev == LPFC_SLI_REV4)
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- else
- strncpy(ae->un.AttrString, phba->OptionROMVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OPTION_ROM_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FIRMWARE_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION,
+ phba->ModelDesc);
}
static int
-lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ lpfc_vpd_t *vp = &phba->vpd;
+ char buf[16] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
- init_utsname()->sysname,
- init_utsname()->release,
- init_utsname()->version);
+ return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf);
+}
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_OS_NAME_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION,
+ lpfc_release_version);
}
static int
-lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_decode_firmware_rev(phba, buf, 1);
- ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_MAX_CT_PAYLOAD_LEN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ buf);
+ }
+
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
+ phba->OptionROMVersion);
}
static int
-lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ struct lpfc_hba *phba = vport->phba;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_decode_firmware_rev(phba, buf, 1);
- len = lpfc_vport_symbolic_node_name(vport,
- ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_SYM_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
+ snprintf(buf, sizeof(buf), "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version);
- /* Nothing is defined for this currently */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_INFO);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf);
}
static int
-lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
- /* Each driver instance corresponds to a single port */
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_NUM_PORTS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN,
+ LPFC_MAX_CT_SIZE);
}
static int
-lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf));
- memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_FABRIC_WWNN);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf);
}
static int
-lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0);
+}
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+static int
+lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr)
+{
+ /* Each driver instance corresponds to a single port */
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1);
+}
- strlcat(ae->un.AttrString, phba->BIOSVersion,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_VERSION);
- return size;
+static int
+lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr)
+{
+ return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN,
+ &vport->fabric_nodename);
}
static int
-lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct lpfc_hba *phba = vport->phba;
- ae = &ad->AttrValue;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION,
+ phba->BIOSVersion);
+}
+static int
+lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr)
+{
/* Driver doesn't have access to this information */
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_BIOS_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0);
}
static int
-lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "EMULEX",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RHBA_VENDOR_ID);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX");
}
-/* Routines for all individual PORT attributes */
+/*
+ * Routines for all individual PORT attributes
+ */
+
static int
-lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ u32 fc4types;
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if Firmware supports NVME and on physical port */
if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
phba->sli4_hba.pc_sli4_params.nvme)
- ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ struct lpfc_hba *phba = vport->phba;
+ u32 speeds = 0;
+ u32 tcfg;
+ u8 i, cnt;
- ae->un.AttrInt = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- if (phba->lmt & LMT_256Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_256GFC;
- if (phba->lmt & LMT_128Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_128GFC;
- if (phba->lmt & LMT_64Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_64GFC;
- if (phba->lmt & LMT_32Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_32GFC;
- if (phba->lmt & LMT_16Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_16GFC;
- if (phba->lmt & LMT_10Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_10GFC;
- if (phba->lmt & LMT_8Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_8GFC;
- if (phba->lmt & LMT_4Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_4GFC;
- if (phba->lmt & LMT_2Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_2GFC;
- if (phba->lmt & LMT_1Gb)
- ae->un.AttrInt |= HBA_PORTSPEED_1GFC;
+ cnt = 0;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tcfg = phba->sli4_hba.conf_trunk;
+ for (i = 0; i < 4; i++, tcfg >>= 1)
+ if (tcfg & 1)
+ cnt++;
+ }
+
+ if (cnt > 2) { /* 4 lane trunk group */
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ } else if (cnt) { /* 2 lane trunk group */
+ if (phba->lmt & LMT_128Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_32GFC;
+ } else {
+ if (phba->lmt & LMT_256Gb)
+ speeds |= HBA_PORTSPEED_256GFC;
+ if (phba->lmt & LMT_128Gb)
+ speeds |= HBA_PORTSPEED_128GFC;
+ if (phba->lmt & LMT_64Gb)
+ speeds |= HBA_PORTSPEED_64GFC;
+ if (phba->lmt & LMT_32Gb)
+ speeds |= HBA_PORTSPEED_32GFC;
+ if (phba->lmt & LMT_16Gb)
+ speeds |= HBA_PORTSPEED_16GFC;
+ if (phba->lmt & LMT_10Gb)
+ speeds |= HBA_PORTSPEED_10GFC;
+ if (phba->lmt & LMT_8Gb)
+ speeds |= HBA_PORTSPEED_8GFC;
+ if (phba->lmt & LMT_4Gb)
+ speeds |= HBA_PORTSPEED_4GFC;
+ if (phba->lmt & LMT_2Gb)
+ speeds |= HBA_PORTSPEED_2GFC;
+ if (phba->lmt & LMT_1Gb)
+ speeds |= HBA_PORTSPEED_1GFC;
+ }
} else {
/* FCoE links support only one speed */
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
+ u32 speeds = 0;
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
switch (phba->fc_linkspeed) {
case LPFC_LINK_SPEED_1GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_1GFC;
+ speeds = HBA_PORTSPEED_1GFC;
break;
case LPFC_LINK_SPEED_2GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_2GFC;
+ speeds = HBA_PORTSPEED_2GFC;
break;
case LPFC_LINK_SPEED_4GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_4GFC;
+ speeds = HBA_PORTSPEED_4GFC;
break;
case LPFC_LINK_SPEED_8GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_8GFC;
+ speeds = HBA_PORTSPEED_8GFC;
break;
case LPFC_LINK_SPEED_10GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_10GFC;
+ speeds = HBA_PORTSPEED_10GFC;
break;
case LPFC_LINK_SPEED_16GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_16GFC;
+ speeds = HBA_PORTSPEED_16GFC;
break;
case LPFC_LINK_SPEED_32GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_32GFC;
+ speeds = HBA_PORTSPEED_32GFC;
break;
case LPFC_LINK_SPEED_64GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_64GFC;
+ speeds = HBA_PORTSPEED_64GFC;
break;
case LPFC_LINK_SPEED_128GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_128GFC;
+ speeds = HBA_PORTSPEED_128GFC;
break;
case LPFC_LINK_SPEED_256GHZ:
- ae->un.AttrInt = HBA_PORTSPEED_256GFC;
+ speeds = HBA_PORTSPEED_256GFC;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
} else {
switch (phba->fc_linkspeed) {
case LPFC_ASYNC_LINK_SPEED_10GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_10GE;
+ speeds = HBA_PORTSPEED_10GE;
break;
case LPFC_ASYNC_LINK_SPEED_25GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_25GE;
+ speeds = HBA_PORTSPEED_25GE;
break;
case LPFC_ASYNC_LINK_SPEED_40GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_40GE;
+ speeds = HBA_PORTSPEED_40GE;
break;
case LPFC_ASYNC_LINK_SPEED_100GBPS:
- ae->un.AttrInt = HBA_PORTSPEED_100GE;
+ speeds = HBA_PORTSPEED_100GE;
break;
default:
- ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN;
+ speeds = HBA_PORTSPEED_UNKNOWN;
break;
}
}
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_SPEED);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds);
}
static int
-lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr)
{
- struct serv_parm *hsp;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
+ struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam;
- ae = &ad->AttrValue;
-
- hsp = (struct serv_parm *)&vport->fc_sparam;
- ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
- (uint32_t) hsp->cmn.bbRcvSizeLsb;
- ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_MAX_FRAME_SIZE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE,
+ (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
+ (uint32_t)hsp->cmn.bbRcvSizeLsb);
}
static int
-lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d",
+ shost->host_no);
- snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
- "/sys/class/scsi_host/host%d", shost->host_no);
- len = strnlen((char *)ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_OS_DEVICE_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[64] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name);
- scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
- vport->phba->os_host_name);
-
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_HOST_NAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf);
}
static int
-lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_NODENAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME,
+ &vport->fc_sparam.nodeName);
}
static int
-lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
+ char buf[256] = { 0 };
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf));
- len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SYM_PORTNAME);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf);
}
static int
-lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
- ae = &ad->AttrValue;
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
- else
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NPORT);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_TYPE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE,
+ (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ?
+ LPFC_FDMI_PORTTYPE_NLPORT :
+ LPFC_FDMI_PORTTYPE_NPORT);
}
static int
-lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_CLASS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS,
+ FC_COS_CLASS2 | FC_COS_CLASS3);
}
static int
-lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
- sizeof(struct lpfc_name));
- size = FOURBYTES + sizeof(struct lpfc_name);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_FABRICNAME);
- return size;
+ return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME,
+ &vport->fabric_portname);
}
static int
-lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
+ struct lpfc_hba *phba = vport->phba;
+ u32 fc4types;
- ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
- ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
+ fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
/* Check to see if NVME is configured or not */
- if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
- ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
+ if (vport == phba->pport &&
+ phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ fc4types |= ATTR_FC4_NVME;
- size = FOURBYTES + 32;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
- return size;
+ return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES,
+ fc4types);
}
static int
-lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- /* Link Up - operational */
- ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_STATE);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE,
+ LPFC_FDMI_PORTSTATE_ONLINE);
}
static int
-lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
vport->fdmi_num_disc = lpfc_find_map_node(vport);
- ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_DISC_PORT);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT,
+ vport->fdmi_num_disc);
}
static int
-lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_PORT_ID);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID);
}
static int
-lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Initiator",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SERVICE);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE,
+ "Smart SAN Initiator");
}
static int
-lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
- sizeof(struct lpfc_name));
- memcpy((((uint8_t *)&ae->un.AttrString) +
- sizeof(struct lpfc_name)),
- &vport->fc_sparam.portName, sizeof(struct lpfc_name));
- size = FOURBYTES + (2 * sizeof(struct lpfc_name));
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_GUID);
- return size;
+ return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID,
+ &vport->fc_sparam.nodeName,
+ &vport->fc_sparam.portName);
}
static int
-lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
-
- strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString,
- sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_VERSION);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION,
+ "Smart SAN Version 2.0");
}
static int
-lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
-
- ae = &ad->AttrValue;
- memset(ae, 0, sizeof(*ae));
- strncpy(ae->un.AttrString, phba->ModelName,
- sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_MODEL);
- return size;
+ return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL,
+ phba->ModelName);
}
static int
-lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
-
/* SRIOV (type 3) is not supported */
- if (vport->vpi)
- ae->un.AttrInt = cpu_to_be32(2); /* NPIV */
- else
- ae->un.AttrInt = cpu_to_be32(1); /* Physical */
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_PORT_INFO);
- return size;
+
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO,
+ (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */);
}
static int
-lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(0);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_QOS);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0);
}
static int
-lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr)
{
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t size;
-
- ae = &ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(1);
- size = FOURBYTES + sizeof(uint32_t);
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
- return size;
+ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1);
}
static int
-lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport,
- struct lpfc_fdmi_attr_def *ad)
+lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_fdmi_attr_entry *ae;
- uint32_t len, size;
- char mibrevision[16];
-
- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- memset(ae, 0, 256);
- sprintf(mibrevision, "ELXE2EM:%04d",
- phba->sli4_hba.pc_sli4_params.mi_ver);
- strncpy(ae->un.AttrString, &mibrevision[0], sizeof(ae->un.AttrString));
- len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString));
- len += (len & 3) ? (4 - (len & 3)) : 4;
- size = FOURBYTES + len;
- ad->AttrLen = cpu_to_be16(size);
- ad->AttrType = cpu_to_be16(RPRT_VENDOR_MI);
- return size;
+ char buf[32] = { 0 };
+
+ sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver);
+
+ return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf);
}
/* RHBA attribute jump table */
int (*lpfc_fdmi_hba_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */
lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */
@@ -3391,7 +3135,7 @@ int (*lpfc_fdmi_hba_action[])
/* RPA / RPRT attribute jump table */
int (*lpfc_fdmi_port_action[])
- (struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) = {
+ (struct lpfc_vport *vport, void *attrbuf) = {
/* Action routine Mask bit Attribute type */
lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */
lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */
@@ -3433,20 +3177,20 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int cmdcode, uint32_t new_mask)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *mp, *bmp;
+ struct lpfc_dmabuf *rq, *rsp;
struct lpfc_sli_ct_request *CtReq;
- struct ulp_bde64 *bpl;
+ struct ulp_bde64_le *bde;
uint32_t bit_pos;
- uint32_t size;
+ uint32_t size, addsz;
uint32_t rsp_size;
uint32_t mask;
struct lpfc_fdmi_reg_hba *rh;
struct lpfc_fdmi_port_entry *pe;
- struct lpfc_fdmi_reg_portattr *pab = NULL;
+ struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL;
struct lpfc_fdmi_attr_block *ab = NULL;
- int (*func)(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad);
- void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
+ int (*func)(struct lpfc_vport *vport, void *attrbuf);
+ void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb);
if (!ndlp)
return 0;
@@ -3455,34 +3199,39 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp)
+ rq = kmalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
goto fdmi_cmd_exit;
- mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
- if (!mp->virt)
- goto fdmi_cmd_free_mp;
+ rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
+ if (!rq->virt)
+ goto fdmi_cmd_free_rq;
/* Allocate buffer for Buffer ptr list */
- bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!bmp)
- goto fdmi_cmd_free_mpvirt;
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (!rsp)
+ goto fdmi_cmd_free_rqvirt;
- bmp->virt = lpfc_mbuf_alloc(phba, 0, &(bmp->phys));
- if (!bmp->virt)
- goto fdmi_cmd_free_bmp;
+ rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys);
+ if (!rsp->virt)
+ goto fdmi_cmd_free_rsp;
- INIT_LIST_HEAD(&mp->list);
- INIT_LIST_HEAD(&bmp->list);
+ INIT_LIST_HEAD(&rq->list);
+ INIT_LIST_HEAD(&rsp->list);
+
+ /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
+ memset(rq->virt, 0, LPFC_BPL_SIZE);
+ rsp_size = LPFC_BPL_SIZE;
/* FDMI request */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0218 FDMI Request Data: x%x x%x x%x\n",
- vport->fc_flag, vport->port_state, cmdcode);
- CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
+ cmdcode, new_mask, vport->fdmi_port_mask,
+ vport->fc_flag, vport->port_state);
+
+ CtReq = (struct lpfc_sli_ct_request *)rq->virt;
/* First populate the CT_IU preamble */
- memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
CtReq->RevisionId.bits.InId = 0;
@@ -3490,17 +3239,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
- rsp_size = LPFC_BPL_SIZE;
+
size = 0;
/* Next fill in the specific FDMI cmd information */
switch (cmdcode) {
case SLI_MGMT_RHAT:
case SLI_MGMT_RHBA:
- rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un.PortID;
+ rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un;
/* HBA Identifier */
memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_fdmi_hba_ident);
if (cmdcode == SLI_MGMT_RHBA) {
/* Registered Port List */
@@ -3509,16 +3259,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(&rh->rpl.pe.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
-
- /* point to the HBA attribute block */
- size = 2 * sizeof(struct lpfc_name) +
- FOURBYTES;
- } else {
- size = sizeof(struct lpfc_name);
+ size += sizeof(struct lpfc_fdmi_reg_port_list);
}
+
ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
ab->EntryCnt = 0;
- size += FOURBYTES;
+ size += FOURBYTES; /* add length of EntryCnt field */
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3529,11 +3276,13 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_hba_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)rh + size));
- ab->EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)rh + size));
+ if (addsz) {
+ ab->EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto hba_out;
}
@@ -3543,27 +3292,40 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
hba_out:
ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
/* Total size */
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_RPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return 0;
+ }
+ fallthrough;
case SLI_MGMT_RPA:
- pab = (struct lpfc_fdmi_reg_portattr *)&CtReq->un.PortID;
+ /* Store base ptr right after preamble */
+ base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un;
+
if (cmdcode == SLI_MGMT_RPRT) {
- rh = (struct lpfc_fdmi_reg_hba *)pab;
+ rh = (struct lpfc_fdmi_reg_hba *)base;
/* HBA Identifier */
memcpy(&rh->hi.PortName,
&phba->pport->fc_sparam.portName,
sizeof(struct lpfc_name));
pab = (struct lpfc_fdmi_reg_portattr *)
- ((uint8_t *)pab + sizeof(struct lpfc_name));
+ ((uint8_t *)base + sizeof(struct lpfc_name));
+ size += sizeof(struct lpfc_name);
+ } else {
+ pab = base;
}
memcpy((uint8_t *)&pab->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
- size += sizeof(struct lpfc_name) + FOURBYTES;
pab->ab.EntryCnt = 0;
+ /* add length of name and EntryCnt field */
+ size += sizeof(struct lpfc_name) + FOURBYTES;
+
bit_pos = 0;
if (new_mask)
mask = new_mask;
@@ -3574,11 +3336,13 @@ hba_out:
while (mask) {
if (mask & 0x1) {
func = lpfc_fdmi_port_action[bit_pos];
- size += func(vport,
- (struct lpfc_fdmi_attr_def *)
- ((uint8_t *)pab + size));
- pab->ab.EntryCnt++;
- if ((size + 256) >
+ addsz = func(vport, ((uint8_t *)base + size));
+ if (addsz) {
+ pab->ab.EntryCnt++;
+ size += addsz;
+ }
+ /* check if another attribute fits */
+ if ((size + FDMI_MAX_ATTRLEN) >
(LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
goto port_out;
}
@@ -3587,10 +3351,7 @@ hba_out:
}
port_out:
pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
- /* Total size */
- if (cmdcode == SLI_MGMT_RPRT)
- size += sizeof(struct lpfc_name);
- size = GID_REQUEST_SZ - 4 + size;
+ size += GID_REQUEST_SZ - 4;
break;
case SLI_MGMT_GHAT:
@@ -3599,7 +3360,7 @@ port_out:
fallthrough;
case SLI_MGMT_DHBA:
case SLI_MGMT_DHAT:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3611,8 +3372,14 @@ port_out:
rsp_size = FC_MAX_NS_RSP;
fallthrough;
case SLI_MGMT_DPRT:
+ if (vport->port_type != LPFC_PHYSICAL_PORT) {
+ ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
+ if (!ndlp)
+ return 0;
+ }
+ fallthrough;
case SLI_MGMT_DPA:
- pe = (struct lpfc_fdmi_port_entry *)&CtReq->un.PortID;
+ pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
memcpy((uint8_t *)&pe->PortName,
(uint8_t *)&vport->fc_sparam.portName,
sizeof(struct lpfc_name));
@@ -3625,31 +3392,32 @@ port_out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0298 FDMI cmdcode x%x not supported\n",
cmdcode);
- goto fdmi_cmd_free_bmpvirt;
+ goto fdmi_cmd_free_rspvirt;
}
CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
- bpl = (struct ulp_bde64 *)bmp->virt;
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
- bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.f.bdeSize = size;
+ bde = (struct ulp_bde64_le *)rsp->virt;
+ bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
+ bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
+ bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 <<
+ ULP_BDE64_TYPE_SHIFT);
+ bde->type_size |= cpu_to_le32(size);
/*
* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
* to hold ndlp reference for the corresponding callback function.
*/
- if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, 0))
+ if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
return 0;
-fdmi_cmd_free_bmpvirt:
- lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-fdmi_cmd_free_bmp:
- kfree(bmp);
-fdmi_cmd_free_mpvirt:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-fdmi_cmd_free_mp:
- kfree(mp);
+fdmi_cmd_free_rspvirt:
+ lpfc_mbuf_free(phba, rsp->virt, rsp->phys);
+fdmi_cmd_free_rsp:
+ kfree(rsp);
+fdmi_cmd_free_rqvirt:
+ lpfc_mbuf_free(phba, rq->virt, rq->phys);
+fdmi_cmd_free_rq:
+ kfree(rq);
fdmi_cmd_exit:
/* Issue FDMI request failed */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -3798,12 +3566,13 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- struct lpfc_dmabuf *inp = cmdiocb->context1;
- struct lpfc_dmabuf *outp = cmdiocb->context2;
+ struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
struct lpfc_sli_ct_request *ctcmd = inp->virt;
struct lpfc_sli_ct_request *ctrsp = outp->virt;
u16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
struct app_id_object *app;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
u32 cmd, hash, bucket;
struct lpfc_vmid *vmp, *cur;
u8 *data = outp->virt;
@@ -3813,9 +3582,9 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (cmd == SLI_CTAS_DALLAPP_ID)
lpfc_ct_free_iocb(phba, cmdiocb);
- if (lpfc_els_chk_latt(vport) || rspiocb->iocb.ulpStatus) {
+ if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
if (cmd != SLI_CTAS_DALLAPP_ID)
- return;
+ goto free_res;
}
/* Check for a CT LS_RJT response */
if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
@@ -3830,7 +3599,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* If DALLAPP_ID failed retry later */
if (cmd == SLI_CTAS_DALLAPP_ID)
vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
- return;
+ goto free_res;
}
}
@@ -3844,7 +3613,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
app->obj.entity_id_len);
if (app->obj.entity_id_len == 0 || app->port_id == 0)
- return;
+ goto free_res;
hash = lpfc_vmid_hash_fn(app->obj.entity_id,
app->obj.entity_id_len);
@@ -3891,6 +3660,9 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
"8857 Invalid command code\n");
}
+free_res:
+ lpfc_ct_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 08b2e85dcd7d..f5252e45a48a 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -2607,8 +2607,8 @@ lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf,
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_multixri_pool *multixri_pool;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2688,8 +2688,8 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
if (!phba->targetport)
return -ENXIO;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2826,8 +2826,8 @@ lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 63)
- nbytes = 63;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -3060,8 +3060,8 @@ lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
char *pbuf;
int i;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > sizeof(mybuf) - 1)
+ nbytes = sizeof(mybuf) - 1;
memset(mybuf, 0, sizeof(mybuf));
@@ -5156,7 +5156,7 @@ error_out:
static int
lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
{
- uint16_t ext_cnt, ext_size;
+ uint16_t ext_cnt = 0, ext_size = 0;
len += scnprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
"\nAvailable Extents Information:\n");
@@ -5484,7 +5484,7 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"Truncated . . .\n");
- break;
+ goto out;
}
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"%03x: %08x %08x %08x %08x "
@@ -5495,6 +5495,17 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
cnt += 32;
ptr += 8;
}
+ if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Truncated . . .\n");
+ goto out;
+ }
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Parameter Data\n");
+ ptr = (uint32_t *)&phba->cgn_p;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "%08x %08x %08x %08x\n",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3));
out:
return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
}
@@ -5520,7 +5531,7 @@ lpfc_rx_monitor_open(struct inode *inode, struct file *file)
if (!debug)
goto out;
- debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
+ debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE);
if (!debug->buffer) {
kfree(debug);
goto out;
@@ -5541,55 +5552,18 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
struct lpfc_rx_monitor_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
char *buffer = debug->buffer;
- struct rxtable_entry *entry;
- int i, len = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
- /* Table is getting updated */
- msleep(20);
- head = atomic_read(&phba->rxtable_idx_head);
- }
- tail = atomic_xchg(&phba->rxtable_idx_tail, head);
- if (!phba->rxtable || head == tail) {
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "Rxtable is empty\n");
- goto out;
- }
- last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
- start = tail;
-
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- " MaxBPI\t Total Data Cmd Total Data Cmpl "
- " Latency(us) Avg IO Size\tMax IO Size IO cnt "
- "Info BWutil(ms)\n");
-get_table:
- for (i = start; i < last; i++) {
- entry = &phba->rxtable[i];
- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "%3d:%12lld %12lld\t%12lld\t"
- "%8lldus\t%8lld\t%10lld "
- "%8d %2d %2d(%2d)\n",
- i, entry->max_bytes_per_interval,
- entry->total_bytes,
- entry->rcv_bytes,
- entry->avg_io_latency,
- entry->avg_io_size,
- entry->max_read_cnt,
- entry->io_cnt,
- entry->cmf_info,
- entry->timer_utilization,
- entry->timer_interval);
+ if (!phba->rx_monitor) {
+ scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE,
+ "Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer,
+ MAX_DEBUGFS_RX_INFO_SIZE,
+ LPFC_MAX_RXMONITOR_ENTRY);
}
- if (head != last) {
- start = 0;
- last = head;
- goto get_table;
- }
-out:
- return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
+ return simple_read_from_buffer(buf, nbytes, ppos, buffer,
+ strlen(buffer));
}
static int
@@ -6259,9 +6233,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_slow_ring_trc);
if (!phba->slow_ring_trc) {
- phba->slow_ring_trc = kmalloc(
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc),
+ phba->slow_ring_trc = kcalloc(
+ lpfc_debugfs_max_slow_ring_trc,
+ sizeof(struct lpfc_debugfs_trc),
GFP_KERNEL);
if (!phba->slow_ring_trc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
@@ -6270,9 +6244,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
- memset(phba->slow_ring_trc, 0,
- (sizeof(struct lpfc_debugfs_trc) *
- lpfc_debugfs_max_slow_ring_trc));
}
snprintf(name, sizeof(name), "nvmeio_trc");
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index a5bf71b34972..8d2e8d05bbc0 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -282,7 +282,7 @@ struct lpfc_idiag {
void *ptr_private;
};
-#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
+#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
struct lpfc_rx_monitor_debug {
char *i_private;
char *buffer;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 37a4b79010bf..f82615d87c4b 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -149,7 +149,6 @@ struct lpfc_nodelist {
uint32_t cmd_qdepth;
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
- struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
/* flags to keep ndlp alive until special conditions are met */
@@ -188,7 +187,6 @@ struct lpfc_node_rrq {
#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */
-#define NLP_FCP_PRLI_RJT 0x00002000 /* Rport does not support FCP PRLI. */
#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */
#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */
#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e83453bea2ae..863b2125fed6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -134,9 +134,9 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
/**
* lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
* @vport: pointer to a host virtual N_Port data structure.
- * @expectRsp: flag indicating whether response is expected.
- * @cmdSize: size of the ELS command.
- * @retry: number of retries to the command IOCB when it fails.
+ * @expect_rsp: flag indicating whether response is expected.
+ * @cmd_size: size of the ELS command.
+ * @retry: number of retries to the command when it fails.
* @ndlp: pointer to a node-list data structure.
* @did: destination identifier.
* @elscmd: the ELS command code.
@@ -152,7 +152,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* Buffer Descriptor Entries (BDEs), allocates buffers for both command
* payload and response payload (if expected). The reference count on the
* ndlp is incremented by 1 and the reference to the ndlp is put into
- * context1 of the IOCB data structure for this IOCB to hold the ndlp
+ * ndlp of the IOCB data structure for this IOCB to hold the ndlp
* reference for the command's callback function to access later.
*
* Return code
@@ -160,25 +160,23 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
* NULL - when els iocb data structure allocation/preparation failed
**/
struct lpfc_iocbq *
-lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
- uint16_t cmdSize, uint8_t retry,
- struct lpfc_nodelist *ndlp, uint32_t did,
- uint32_t elscmd)
+lpfc_prep_els_iocb(struct lpfc_vport *vport, u8 expect_rsp,
+ u16 cmd_size, u8 retry,
+ struct lpfc_nodelist *ndlp, u32 did,
+ u32 elscmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
- struct ulp_bde64 *bpl;
- IOCB_t *icmd;
-
+ struct lpfc_dmabuf *pcmd, *prsp, *pbuflist, *bmp;
+ struct ulp_bde64_le *bpl;
+ u32 timeout = 0;
if (!lpfc_is_link_up(phba))
return NULL;
/* Allocate buffer for command iocb */
elsiocb = lpfc_sli_get_iocbq(phba);
-
- if (elsiocb == NULL)
+ if (!elsiocb)
return NULL;
/*
@@ -186,35 +184,33 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
* in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
*/
if ((did == Fabric_DID) &&
- (phba->hba_flag & HBA_FIP_SUPPORT) &&
- ((elscmd == ELS_CMD_FLOGI) ||
- (elscmd == ELS_CMD_FDISC) ||
- (elscmd == ELS_CMD_LOGO)))
+ (phba->hba_flag & HBA_FIP_SUPPORT) &&
+ ((elscmd == ELS_CMD_FLOGI) ||
+ (elscmd == ELS_CMD_FDISC) ||
+ (elscmd == ELS_CMD_LOGO)))
switch (elscmd) {
case ELS_CMD_FLOGI:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_FDISC:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
case ELS_CMD_LOGO:
- elsiocb->iocb_flag |=
- ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
- & LPFC_FIP_ELS_ID_MASK);
- break;
+ elsiocb->cmd_flag |=
+ ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+ & LPFC_FIP_ELS_ID_MASK);
+ break;
}
else
- elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
-
- icmd = &elsiocb->iocb;
+ elsiocb->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
/* fill in BDEs for command */
/* Allocate buffer for command payload */
- pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
if (pcmd)
pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
if (!pcmd || !pcmd->virt)
@@ -223,19 +219,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
INIT_LIST_HEAD(&pcmd->list);
/* Allocate buffer for response payload */
- if (expectRsp) {
- prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (expect_rsp) {
+ prsp = kmalloc(sizeof(*prsp), GFP_KERNEL);
if (prsp)
prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&prsp->phys);
if (!prsp || !prsp->virt)
goto els_iocb_free_prsp_exit;
INIT_LIST_HEAD(&prsp->list);
- } else
+ } else {
prsp = NULL;
+ }
/* Allocate buffer for Buffer ptr list */
- pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ pbuflist = kmalloc(sizeof(*pbuflist), GFP_KERNEL);
if (pbuflist)
pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
&pbuflist->phys);
@@ -244,90 +241,61 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
INIT_LIST_HEAD(&pbuflist->list);
- if (expectRsp) {
- icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
-
- icmd->un.elsreq64.remoteID = did; /* DID */
- icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
- if (elscmd == ELS_CMD_FLOGI)
- icmd->ulpTimeout = FF_DEF_RATOV * 2;
- else if (elscmd == ELS_CMD_LOGO)
- icmd->ulpTimeout = phba->fc_ratov;
- else
- icmd->ulpTimeout = phba->fc_ratov * 2;
- } else {
- icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
- icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
- icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
- icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
- icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
- icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
- }
- icmd->ulpBdeCount = 1;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
-
- /*
- * If we have NPIV enabled, we want to send ELS traffic by VPI.
- * For SLI4, since the driver controls VPIs we also want to include
- * all ELS pt2pt protocol traffic as well.
- */
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
- ((phba->sli_rev == LPFC_SLI_REV4) &&
- (vport->fc_flag & FC_PT2PT))) {
-
- if (expectRsp) {
- icmd->un.elsreq64.myID = vport->fc_myDID;
-
- /* For ELS_REQUEST64_CR, use the VPI by default */
- icmd->ulpContext = phba->vpi_ids[vport->vpi];
+ if (expect_rsp) {
+ switch (elscmd) {
+ case ELS_CMD_FLOGI:
+ timeout = FF_DEF_RATOV * 2;
+ break;
+ case ELS_CMD_LOGO:
+ timeout = phba->fc_ratov;
+ break;
+ default:
+ timeout = phba->fc_ratov * 2;
}
- icmd->ulpCt_h = 0;
- /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
- if (elscmd == ELS_CMD_ECHO)
- icmd->ulpCt_l = 0; /* context = invalid RPI */
- else
- icmd->ulpCt_l = 1; /* context = VPI */
+ /* Fill SGE for the num bde count */
+ elsiocb->num_bdes = 2;
}
- bpl = (struct ulp_bde64 *) pbuflist->virt;
- bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
- bpl->tus.f.bdeSize = cmdSize;
- bpl->tus.f.bdeFlags = 0;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bmp = pcmd;
+ else
+ bmp = pbuflist;
+
+ lpfc_sli_prep_els_req_rsp(phba, elsiocb, vport, bmp, cmd_size, did,
+ elscmd, timeout, expect_rsp);
+
+ bpl = (struct ulp_bde64_le *)pbuflist->virt;
+ bpl->addr_low = cpu_to_le32(putPaddrLow(pcmd->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(pcmd->phys));
+ bpl->type_size = cpu_to_le32(cmd_size);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
- if (expectRsp) {
+ if (expect_rsp) {
bpl++;
- bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
- bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
- bpl->tus.f.bdeSize = FCELSSIZE;
- bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
- bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl->addr_low = cpu_to_le32(putPaddrLow(prsp->phys));
+ bpl->addr_high = cpu_to_le32(putPaddrHigh(prsp->phys));
+ bpl->type_size = cpu_to_le32(FCELSSIZE);
+ bpl->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
}
- elsiocb->context2 = pcmd;
- elsiocb->context3 = pbuflist;
+ elsiocb->cmd_dmabuf = pcmd;
+ elsiocb->bpl_dmabuf = pbuflist;
elsiocb->retry = retry;
elsiocb->vport = vport;
elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
- if (prsp) {
+ if (prsp)
list_add(&prsp->list, &pcmd->list);
- }
- if (expectRsp) {
+ if (expect_rsp) {
/* Xmit ELS command <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0116 Xmit ELS command x%x to remote "
"NPORT x%x I/O tag: x%x, port state:x%x "
- "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n",
+ "rpi x%x fc_flag:x%x\n",
elscmd, did, elsiocb->iotag,
vport->port_state, ndlp->nlp_rpi,
- vport->fc_flag, ndlp->nlp_flag, vport);
+ vport->fc_flag);
} else {
/* Xmit ELS response <elsCmd> to remote NPORT <did> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -335,13 +303,14 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
"NPORT x%x I/O tag: x%x, size: x%x "
"port_state x%x rpi x%x fc_flag x%x\n",
elscmd, ndlp->nlp_DID, elsiocb->iotag,
- cmdSize, vport->port_state,
+ cmd_size, vport->port_state,
ndlp->nlp_rpi, vport->fc_flag);
}
+
return elsiocb;
els_iocb_free_pbuf_exit:
- if (expectRsp)
+ if (expect_rsp)
lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
kfree(pbuflist);
@@ -376,7 +345,6 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct serv_parm *sp;
int rc;
@@ -426,7 +394,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp) {
err = 6;
- goto fail_no_ndlp;
+ goto fail_free_mbox;
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -442,13 +410,8 @@ fail_issue_reg_login:
* for the failed mbox command.
*/
lpfc_nlp_put(ndlp);
-fail_no_ndlp:
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
fail_free_mbox:
- mempool_free(mbox, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
fail:
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -496,45 +459,37 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
- dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!dmabuf) {
- rc = -ENOMEM;
- goto fail;
- }
- dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
- if (!dmabuf->virt) {
+ rc = lpfc_mbox_rsrc_prep(phba, mboxq);
+ if (rc) {
rc = -ENOMEM;
- goto fail;
+ goto fail_mbox;
}
+ dmabuf = mboxq->ctx_buf;
memcpy(dmabuf->virt, &phba->fc_fabparam,
sizeof(struct serv_parm));
}
vport->port_state = LPFC_FABRIC_CFG_LINK;
- if (dmabuf)
+ if (dmabuf) {
lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
- else
+ /* lpfc_reg_vfi memsets the mailbox. Restore the ctx_buf. */
+ mboxq->ctx_buf = dmabuf;
+ } else {
lpfc_reg_vfi(mboxq, vport, 0);
+ }
mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
mboxq->vport = vport;
- mboxq->ctx_buf = dmabuf;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = -ENXIO;
- goto fail;
+ goto fail_mbox;
}
return 0;
+fail_mbox:
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
fail:
- if (mboxq)
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (dmabuf) {
- if (dmabuf->virt)
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- }
-
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0289 Issue Register VFI failed: Err %d\n", rc);
@@ -650,7 +605,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
* @sp: pointer to service parameter data structure.
- * @irsp: pointer to the IOCB within the lpfc response IOCB.
+ * @ulp_word4: command response value
*
* This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
* function to handle the completion of a Fabric Login (FLOGI) into a fabric
@@ -667,7 +622,7 @@ lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
**/
static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
- struct serv_parm *sp, IOCB_t *irsp)
+ struct serv_parm *sp, uint32_t ulp_word4)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
@@ -692,7 +647,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
}
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
ndlp->nlp_class_sup = 0;
@@ -903,10 +858,12 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (rc)
vport->fc_myDID = PT2PT_LocalID;
- /* Decrement ndlp reference count indicating that ndlp can be
- * safely released when other references to it are done.
+ /* If not registered with a transport, decrement ndlp reference
+ * count indicating that ndlp can be safely released when other
+ * references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
if (!ndlp) {
@@ -943,11 +900,12 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto fail;
}
} else {
- /* This side will wait for the PLOGI, decrement ndlp reference
- * count indicating that ndlp can be released when other
- * references to it are done.
+ /* This side will wait for the PLOGI. If not registered with
+ * a transport, decrement node reference count indicating that
+ * ndlp can be released when other references are removed.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
+ lpfc_nlp_put(ndlp);
/* Start discovery - this should just do CLEAR_LA */
lpfc_disc_start(vport);
@@ -987,28 +945,40 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
- struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ IOCB_t *irsp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
struct serv_parm *sp;
uint16_t fcf_index;
int rc;
+ u32 ulp_status, ulp_word4, tmo;
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
/* One additional decrement on node reference count to
* trigger the release of the node
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
+ lpfc_nlp_put(ndlp);
goto out;
}
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
+
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FLOGI cmpl: status:x%x/x%x state:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->port_state);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/*
* In case of FIP mode, perform roundrobin FCF failover
* due to new FCF discovery
@@ -1019,8 +989,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto stop_rr_fcf_flogi;
if ((phba->fcoe_cvl_eventtag_attn ==
phba->fcoe_cvl_eventtag) &&
- (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ (ulp_status == IOSTAT_LOCAL_REJECT) &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_SLI_ABORTED))
goto stop_rr_fcf_flogi;
else
@@ -1031,8 +1001,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"status:x%x/x%x, tmo:x%x, perform "
"roundrobin FCF failover\n",
phba->fcf.current_rec.fcf_indx,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
+ ulp_status, ulp_word4, tmo);
lpfc_sli4_set_fcf_flogi_fail(phba,
phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
@@ -1043,15 +1012,14 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2858 FLOGI failure Status:x%x/x%x TMO"
":x%x Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ ulp_status, ulp_word4, tmo,
+ phba->hba_flag, phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
@@ -1060,19 +1028,25 @@ stop_rr_fcf_flogi:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->sli4_xritag, irsp->ulpTimeout,
- kref_read(&ndlp->kref));
+ ulp_status, ulp_word4, cmdiocb->sli4_xritag,
+ tmo, kref_read(&ndlp->kref));
/* If this is not a loop open failure, bail out */
- if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_LOOP_OPEN_FAILURE)))
+ if (!(ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE))) {
+ /* FLOGI failure */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0100 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x\n",
+ ulp_status, ulp_word4, tmo);
goto flogifail;
+ }
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+ FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
@@ -1098,7 +1072,7 @@ stop_rr_fcf_flogi:
}
/* Do not register VFI if the driver aborted FLOGI */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
@@ -1122,16 +1096,17 @@ stop_rr_fcf_flogi:
/* FLOGI completes successfully */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0101 FLOGI completes successfully, I/O tag:x%x, "
+ "0101 FLOGI completes successfully, I/O tag:x%x "
"xri x%x Data: x%x x%x x%x x%x x%x x%x x%x %d\n",
cmdiocb->iotag, cmdiocb->sli4_xritag,
- irsp->un.ulpWord[4], sp->cmn.e_d_tov,
+ ulp_word4, sp->cmn.e_d_tov,
sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
vport->port_state, vport->fc_flag,
sp->cmn.priority_tagging, kref_read(&ndlp->kref));
if (sp->cmn.priority_tagging)
- vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
+ vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
+ LPFC_VMID_TYPE_PRIO);
if (vport->port_state == LPFC_FLOGI) {
/*
@@ -1139,7 +1114,8 @@ stop_rr_fcf_flogi:
* we are point to point, if Fport we are Fabric.
*/
if (sp->cmn.fPort)
- rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
+ rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp,
+ ulp_word4);
else if (!(phba->hba_flag & HBA_FCOE_MODE))
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
else {
@@ -1206,16 +1182,16 @@ flogifail:
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
- if (!lpfc_error_lost_link(irsp)) {
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI failed, so just use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
- } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ } else if (((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ (((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_ABORTED) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_SLI_DOWN))) &&
(phba->link_state != LPFC_CLEAR_LA)) {
/* If FLOGI failed enable link interrupt. */
@@ -1239,22 +1215,24 @@ static void
lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
uint32_t *pcmd;
uint32_t cmd;
+ u32 ulp_status, ulp_word4;
- pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
+ pcmd = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
cmd = *pcmd;
- irsp = &rspiocb->iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"6445 ELS completes after LINK_DOWN: "
" Status %x/%x cmd x%x flg x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
- cmdiocb->iocb_flag);
+ ulp_status, ulp_word4, cmd,
+ cmdiocb->cmd_flag);
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC) {
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
lpfc_els_free_iocb(phba, cmdiocb);
@@ -1274,7 +1252,7 @@ lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* out FLOGI ELS command with one outstanding fabric IOCB at a time.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the FLOGI ELS command.
*
* Return code
@@ -1287,10 +1265,11 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
struct serv_parm *sp;
- IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
+ IOCB_t *icmd = NULL;
struct lpfc_iocbq *elsiocb;
struct lpfc_iocbq defer_flogi_acc;
- uint8_t *pcmd;
+ u8 *pcmd, ct;
uint16_t cmdsize;
uint32_t tmo, did;
int rc;
@@ -1302,8 +1281,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
+ wqe = &elsiocb->wqe;
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
icmd = &elsiocb->iocb;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For FLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
@@ -1336,12 +1316,15 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->sli_rev == LPFC_SLI_REV4) {
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_0) {
- elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
- elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
/* FLOGI needs to be 3 for WQE FCFI */
+ ct = SLI4_CT_FCFI;
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+
/* Set the fcfi to the fcfi we registered with */
- elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->fcf.fcfi);
}
+
/* Can't do SLI4 class2 without support sequence coalescing */
sp->cls2.classValid = 0;
sp->cls2.seqDelivery = 0;
@@ -1354,13 +1337,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
- } else
+ } else {
sp->cmn.request_multiple_Nport = 0;
- }
+ }
- if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
+ if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
+ }
}
tmo = phba->fc_ratov;
@@ -1369,14 +1353,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->fc_ratov = tmo;
phba->fc_stat.elsXmitFLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_flogi;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FLOGI: opt:x%x",
phba->sli3_options, 0, 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -1390,16 +1374,34 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
/* Check for a deferred FLOGI ACC condition */
if (phba->defer_flogi_acc_flag) {
+ /* lookup ndlp for received FLOGI */
+ ndlp = lpfc_findnode_did(vport, 0);
+ if (!ndlp)
+ return 0;
+
did = vport->fc_myDID;
vport->fc_myDID = Fabric_DID;
memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
- defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
- defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
- phba->defer_flogi_acc_ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_ctxt_tag,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_rx_id);
+ bf_set(wqe_rcvoxid,
+ &defer_flogi_acc.wqe.xmit_els_rsp.wqe_com,
+ phba->defer_flogi_acc_ox_id);
+ } else {
+ icmd = &defer_flogi_acc.iocb;
+ icmd->ulpContext = phba->defer_flogi_acc_rx_id;
+ icmd->unsli3.rcvsli3.ox_id =
+ phba->defer_flogi_acc_ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
@@ -1412,8 +1414,12 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp, NULL);
phba->defer_flogi_acc_flag = false;
-
vport->fc_myDID = did;
+
+ /* Decrement ndlp reference count to indicate the node can be
+ * released when other references are removed.
+ */
+ lpfc_nlp_put(ndlp);
}
return 0;
@@ -1439,7 +1445,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_nodelist *ndlp;
- IOCB_t *icmd;
+ u32 ulp_command;
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
@@ -1456,13 +1462,13 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
- ndlp = (struct lpfc_nodelist *)(iocb->context1);
+ ulp_command = get_job_cmnd(phba, iocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
+ ndlp = iocb->ndlp;
if (ndlp && ndlp->nlp_DID == Fabric_DID) {
if ((phba->pport->fc_flag & FC_PT2PT) &&
!(phba->pport->fc_flag & FC_PT2PT_PLOGI))
- iocb->fabric_iocb_cmpl =
+ iocb->fabric_cmd_cmpl =
lpfc_ignore_els_cmpl;
lpfc_sli_issue_abort_iotag(phba, pring, iocb,
NULL);
@@ -1515,11 +1521,16 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
lpfc_enqueue_node(vport, ndlp);
}
+ /* Reset the Fabric flag, topology change may have happened */
+ vport->fc_flag &= ~FC_FABRIC;
if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
- /* This decrement of reference count to node shall kick off
- * the release of the node.
+ /* A node reference should be retained while registered with a
+ * transport or dev-loss-evt work is pending.
+ * Otherwise, decrement node reference to trigger release.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ lpfc_nlp_put(ndlp);
return 0;
}
return 1;
@@ -1562,10 +1573,13 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
}
if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
- /* decrement node reference count to trigger the release of
- * the node.
+ /* A node reference should be retained while registered with a
+ * transport or dev-loss-evt work is pending.
+ * Otherwise, decrement node reference to trigger release.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
+ !(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ lpfc_nlp_put(ndlp);
return 0;
}
return 1;
@@ -1776,18 +1790,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Move this back to NPR state */
if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
- /* The new_ndlp is replacing ndlp totally, so we need
- * to put ndlp on UNUSED list and try to free it.
+ /* The ndlp doesn't have a portname yet, but does have an
+ * NPort ID. The new_ndlp portname matches the Rport's
+ * portname. Reinstantiate the new_ndlp and reset the ndlp.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3179 PLOGI confirm NEW: %x %x\n",
new_ndlp->nlp_DID, keepDID);
/* Two ndlps cannot have the same did on the nodelist.
- * Note: for this case, ndlp has a NULL WWPN so setting
- * the nlp_fc4_type isn't required.
+ * The KeepDID and keep_nlp_fc4_type need to be swapped
+ * because ndlp is inflight with no WWPN.
*/
ndlp->nlp_DID = keepDID;
+ ndlp->nlp_fc4_type = keep_nlp_fc4_type;
lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
@@ -1802,9 +1818,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
lpfc_unreg_rpi(vport, ndlp);
- /* Two ndlps cannot have the same did and the fc4
- * type must be transferred because the ndlp is in
- * flight.
+ /* The ndlp and new_ndlp both have WWPNs but are swapping
+ * NPort Ids and attributes.
*/
ndlp->nlp_DID = keepDID;
ndlp->nlp_fc4_type = keep_nlp_fc4_type;
@@ -1891,43 +1906,43 @@ lpfc_end_rscn(struct lpfc_vport *vport)
static void
lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_node_rrq *rrq;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
/* we pass cmdiocb to state machine which needs rspiocb as well */
rrq = cmdiocb->context_un.rrq;
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"RRQ cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4,
+ get_job_els_rsp64_did(phba, cmdiocb));
+
/* rrq completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2880 RRQ completes to DID x%x "
"Data: x%x x%x x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, rrq->xritag, rrq->rxid);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ get_wqe_tmo(cmdiocb), rrq->xritag, rrq->rxid);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
/* RRQ failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2881 RRQ failure DID:%06X Status:"
"x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
}
lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
@@ -1966,24 +1981,33 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *prsp;
int disc;
struct serv_parm *sp = NULL;
+ u32 ulp_status, ulp_word4, did, iotag;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+ }
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PLOGI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
- ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
+ ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0136 PLOGI completes to NPort x%x "
"with no ndlp. Data: x%x x%x x%x\n",
- irsp->un.elsreq64.remoteID,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpIoTag);
+ did, ulp_status, ulp_word4, iotag);
goto out_freeiocb;
}
@@ -2000,7 +2024,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0102 PLOGI completes to NPort x%06x "
"Data: x%x x%x x%x x%x x%x\n",
ndlp->nlp_DID, ndlp->nlp_fc4_type,
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
@@ -2011,7 +2035,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
@@ -2023,17 +2047,18 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
/* PLOGI failed Don't print the vport to vport rjts */
- if (irsp->ulpStatus != IOSTAT_LS_RJT ||
- (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
- ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
- (phba)->pport->cfg_log_verbose & LOG_ELS)
+ if (ulp_status != IOSTAT_LS_RJT ||
+ (((ulp_word4) >> 16 != LSRJT_INVALID_CMD) &&
+ ((ulp_word4) >> 16 != LSRJT_UNABLE_TPC)) ||
+ (phba)->pport->cfg_log_verbose & LOG_ELS)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ "2753 PLOGI failure DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PLOGI);
@@ -2046,23 +2071,24 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&ndlp->lock);
goto out;
}
- spin_unlock_irq(&ndlp->lock);
/* No PLOGI collision and the node is not registered with the
* scsi or nvme transport. It is no longer an active node. Just
* start the device remove process.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
- spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- }
} else {
/* Good status, call state machine */
- prsp = list_entry(((struct lpfc_dmabuf *)
- cmdiocb->context2)->list.next,
+ prsp = list_entry(cmdiocb->cmd_dmabuf->list.next,
struct lpfc_dmabuf, list);
ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
@@ -2107,7 +2133,7 @@ out:
out_freeiocb:
/* Release the reference on the original I/O request. */
- free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ free_ndlp = cmdiocb->ndlp;
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -2127,7 +2153,7 @@ out_freeiocb:
* the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding
- * the ndlp and the reference to ndlp will be stored into the context1 field
+ * the ndlp and the reference to ndlp will be stored into the ndlp field
* of the IOCB for the completion callback function to the PLOGI ELS command.
*
* Return code
@@ -2174,11 +2200,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
if (!elsiocb)
return 1;
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
-
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PLOGI request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
@@ -2225,13 +2247,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
}
phba->fc_stat.elsXmitPLOGI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_plogi;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x refcnt %d",
did, kref_read(&ndlp->kref), 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -2264,16 +2286,21 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
char *mode;
u32 loglevel;
+ u32 ulp_status;
+ u32 ulp_word4;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
@@ -2284,21 +2311,21 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"PRLI cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
"Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
+ ndlp->nlp_DID, ulp_status, ulp_word4,
vport->num_disc_nodes, ndlp->fc4_prli_sent);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
@@ -2321,11 +2348,11 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
"data: x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (!lpfc_error_lost_link(irsp))
+ if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
@@ -2341,14 +2368,18 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* it is no longer an active node. Otherwise devloss
* handles the final cleanup.
*/
+ spin_lock_irq(&ndlp->lock);
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
!ndlp->fc4_prli_sent) {
- spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- }
} else {
/* Good status, call state machine. However, if another
* PRLI is outstanding, don't call the state machine
@@ -2378,7 +2409,7 @@ out:
* routine lpfc_sli_issue_iocb() to send out PRLI command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the PRLI ELS command.
*
* Return code
@@ -2452,7 +2483,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For PRLI request, remainder of payload is service parameters */
memset(pcmd, 0, cmdsize);
@@ -2484,7 +2515,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_FCP_REQ;
/* Remove FCP type - processed. */
local_nlp_type &= ~NLP_FC4_FCP;
@@ -2518,41 +2549,40 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
- elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
+ elsiocb->cmd_flag |= LPFC_PRLI_NVME_REQ;
/* Remove NVME type - processed. */
local_nlp_type &= ~NLP_FC4_NVME;
}
phba->fc_stat.elsXmitPRLI++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_PRLI_SND;
-
- /* The vport counters are used for lpfc_scan_finished, but
- * the ndlp is used to track outstanding PRLIs for different
- * FC4 types.
- */
- vport->fc_prli_sent++;
- ndlp->fc4_prli_sent++;
- spin_unlock_irq(&ndlp->lock);
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_prli;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PRLI: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
- goto err;
+ return 1;
}
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
- goto err;
+ return 1;
}
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_PRLI_SND;
+ vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
+ spin_unlock_irq(&ndlp->lock);
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2562,12 +2592,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto send_next_prli;
else
return 0;
-
-err:
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_PRLI_SND;
- spin_unlock_irq(&ndlp->lock);
- return 1;
}
/**
@@ -2719,16 +2743,27 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
int disc;
+ u32 ulp_status, ulp_word4, tmo;
+ bool release_node = false;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
- irsp = &(rspiocb->iocb);
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ADISC cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* Since ndlp can be freed in the disc state machine, note if this node
@@ -2742,8 +2777,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0104 ADISC completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, disc, vport->num_disc_nodes);
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, disc, vport->num_disc_nodes);
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
spin_lock_irq(&ndlp->lock);
@@ -2752,7 +2787,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
@@ -2767,23 +2802,26 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ADISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2755 ADISC failure DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
-
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
- NLP_EVT_CMPL_ADISC);
+ NLP_EVT_CMPL_ADISC);
/* As long as this node is not registered with the SCSI or NVMe
* transport, it is no longer an active node. Otherwise
* devloss handles the final cleanup.
*/
+ spin_lock_irq(&ndlp->lock);
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
- spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- spin_unlock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS))
+ release_node = true;
+ }
+ spin_unlock_irq(&ndlp->lock);
+
+ if (release_node)
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- }
} else
/* Good status, call state machine */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2810,7 +2848,7 @@ out:
* to issue the ADISC ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the ADISC ELS command.
*
* Return code
@@ -2834,7 +2872,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For ADISC request, remainder of payload is service parameters */
*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
@@ -2848,12 +2886,12 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ap->DID = be32_to_cpu(vport->fc_myDID);
phba->fc_stat.elsXmitADISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_ADISC_SND;
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto err;
}
@@ -2861,6 +2899,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue ADISC: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -2892,17 +2931,29 @@ static void
lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = ndlp->vport;
IOCB_t *irsp;
unsigned long flags;
uint32_t skip_recovery = 0;
int wake_up_waiter = 0;
+ u32 ulp_status;
+ u32 ulp_word4;
+ u32 tmo;
/* we pass cmdiocb to state machine which needs rspiocb as well */
- cmdiocb->context_un.rsp_iocb = rspiocb;
+ cmdiocb->rsp_iocb = rspiocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ }
- irsp = &(rspiocb->iocb);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
@@ -2913,7 +2964,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
ndlp->nlp_DID);
/* LOGO completes to NPort <nlp_DID> */
@@ -2921,8 +2972,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"0105 LOGO completes to NPort x%x "
"refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes);
if (lpfc_els_chk_latt(vport)) {
skip_recovery = 1;
@@ -2934,14 +2985,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* all acceptable. Note the failure and move forward with
* discovery. The PLOGI will retry.
*/
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
- /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
- if (lpfc_error_lost_link(irsp)) {
+ "2756 LOGO failure, No Retry DID:%06X "
+ "Status:x%x/x%x\n",
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4);
+
+ if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
skip_recovery = 1;
goto out;
}
@@ -2961,18 +3013,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
spin_unlock_irq(&ndlp->lock);
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
- /* Presume the node was released. */
- return;
+ goto out_rsrc_free;
}
out:
- /* Driver is done with the IO. */
- lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
-
/* At this point, the LOGO processing is complete. NOTE: For a
* pt2pt topology, we are assuming the NPortID will only change
* on link up processing. For a LOGO / PLOGI initiated by the
@@ -2996,9 +3040,13 @@ out:
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3187 LOGO completes to NPort x%x: Start "
"Recovery Data: x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
+ ndlp->nlp_DID, ulp_status,
+ ulp_word4, tmo,
vport->num_disc_nodes);
+
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
+
lpfc_disc_start(vport);
return;
}
@@ -3015,6 +3063,10 @@ out:
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_DEVICE_RM);
}
+out_rsrc_free:
+ /* Driver is done with the I/O. */
+ lpfc_els_free_iocb(phba, cmdiocb);
+ lpfc_nlp_put(ndlp);
}
/**
@@ -3029,7 +3081,7 @@ out:
* lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the LOGO ELS command.
*
* Callers of this routine are expected to unregister the RPI first
@@ -3061,7 +3113,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
@@ -3071,13 +3123,13 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
phba->fc_stat.elsXmitLOGO++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto err;
}
@@ -3085,6 +3137,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue LOGO: did:x%x refcnt %d",
ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
+
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_els_free_iocb(phba, elsiocb);
@@ -3127,24 +3180,34 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_nodelist *free_ndlp;
IOCB_t *irsp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
- irsp = &rspiocb->iocb;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ iotag, ulp_status, ulp_word4, tmo);
/* Check to see if link went down during discovery */
lpfc_els_chk_latt(vport);
- free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ free_ndlp = cmdiocb->ndlp;
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(free_ndlp);
@@ -3171,7 +3234,6 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ns_ndlp;
LPFC_MBOXQ_t *mbox;
- struct lpfc_dmabuf *mp;
if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
return rc;
@@ -3208,7 +3270,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
if (!mbox->ctx_ndlp) {
rc = -ENOMEM;
- goto out_mem;
+ goto out;
}
mbox->vport = vport;
@@ -3216,21 +3278,15 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
if (rc == MBX_NOT_FINISHED) {
rc = -ENODEV;
lpfc_nlp_put(fc_ndlp);
- goto out_mem;
+ goto out;
}
/* Success path. Exit. */
lpfc_nlp_set_state(vport, fc_ndlp,
NLP_STE_REG_LOGIN_ISSUE);
return 0;
- out_mem:
- fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-
out:
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
"0938 %s: failed to format reg_login "
"Data: x%x x%x x%x x%x\n", __func__,
@@ -3260,23 +3316,32 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd, *prsp;
u32 *pdata;
u32 cmd;
- struct lpfc_nodelist *ndlp = cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
- irsp = &rspiocb->iocb;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"ELS cmd cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
+
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
- "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
- "x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout,
- cmdiocb->retry);
+ "0217 ELS cmd tag x%x completes Data: x%x x%x x%x x%x\n",
+ iotag, ulp_status, ulp_word4, tmo, cmdiocb->retry);
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
if (!pcmd)
goto out;
@@ -3286,8 +3351,8 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
cmd = *pdata;
/* Only 1 retry for ELS Timeout only */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
IOERR_SEQUENCE_TIMEOUT)) {
cmdiocb->retry++;
if (cmdiocb->retry <= 1) {
@@ -3299,7 +3364,6 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_issue_els_edc(vport, cmdiocb->retry);
break;
case ELS_CMD_RDF:
- cmdiocb->context1 = NULL; /* save ndlp refcnt */
lpfc_issue_els_rdf(vport, cmdiocb->retry);
break;
}
@@ -3312,11 +3376,11 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb);
return;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
/* ELS discovery cmd completes with error */
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT,
"4203 ELS cmd x%x error: x%x x%X\n", cmd,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
@@ -3341,7 +3405,7 @@ lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"4677 Fabric RDF Notification Grant "
"Data: 0x%08x Reg: %x %x\n",
be32_to_cpu(
- prdf->reg_d1.desc_tags[i]),
+ prdf->reg_d1.desc_tags[i]),
phba->cgn_reg_signal,
phba->cgn_reg_fpin);
}
@@ -3367,7 +3431,7 @@ out:
* routine is invoked to send the SCR IOCB.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the SCR ELS command.
*
* Return code
@@ -3409,7 +3473,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
}
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
pcmd += sizeof(uint32_t);
@@ -3423,9 +3487,9 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitSCR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -3456,7 +3520,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
* replay the RSCN to registered recipients.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RSCN ELS command.
*
* Return code
@@ -3506,7 +3570,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
if (!elsiocb)
return 1;
- event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
+ event = elsiocb->cmd_dmabuf->virt;
event->rscn.rscn_cmd = ELS_RSCN;
event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
@@ -3520,9 +3584,9 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
event->portid.rscn_fid[2] = nportid & 0x000000FF;
phba->fc_stat.elsXmitRSCN++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -3538,11 +3602,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
- lpfc_nlp_put(ndlp);
return 0;
}
@@ -3560,7 +3619,7 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
* lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the FARPR ELS command.
*
* Return code
@@ -3591,11 +3650,11 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
}
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_RNID);
+ ndlp->nlp_DID, ELS_CMD_FARPR);
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
pcmd += sizeof(uint32_t);
@@ -3624,9 +3683,9 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
ndlp->nlp_DID, 0, 0);
phba->fc_stat.elsXmitFARPR++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -3657,7 +3716,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
* for diagnostic functions.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RDF ELS command.
*
* Return code
@@ -3694,8 +3753,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
return -ENOMEM;
/* Configure the payload for the supported FPIN events. */
- prdf = (struct lpfc_els_rdf_req *)
- (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ prdf = (struct lpfc_els_rdf_req *)elsiocb->cmd_dmabuf->virt;
memset(prdf, 0, cmdsize);
prdf->rdf.fpin_cmd = ELS_RDF;
prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
@@ -3715,9 +3773,9 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
phba->cgn_reg_fpin);
phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return -EIO;
}
@@ -3788,9 +3846,6 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
{
u32 rsp_sig_cap = 0, drv_sig_cap = 0;
u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0;
- struct lpfc_cgn_info *cp;
- u32 crc;
- u16 sig_freq;
/* Get rsp signal and frequency capabilities. */
rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability);
@@ -3846,25 +3901,7 @@ lpfc_least_capable_settings(struct lpfc_hba *phba,
}
}
- if (!phba->cgn_i)
- return;
-
- /* Update signal frequency in congestion info buffer */
- cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
-
- /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
- * are received by the HBA
- */
- sig_freq = phba->cgn_sig_freq;
-
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
- cp->cgn_warn_freq = cpu_to_le16(sig_freq);
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- cp->cgn_alarm_freq = cpu_to_le16(sig_freq);
- cp->cgn_warn_freq = cpu_to_le16(sig_freq);
- }
- crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
- cp->cgn_info_crc = cpu_to_le32(crc);
+ /* We are NOT recording signal frequency in congestion info buffer */
return;
out_no_support:
@@ -3893,7 +3930,7 @@ static void
lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp;
+ IOCB_t *irsp_iocb;
struct fc_els_edc_resp *edc_rsp;
struct fc_tlv_desc *tlv;
struct fc_diag_cg_sig_desc *pcgd;
@@ -3904,22 +3941,33 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
int desc_cnt = 0, bytes_remain;
bool rcv_cap_desc = false;
struct lpfc_nodelist *ndlp;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
+
+ ndlp = cmdiocb->ndlp;
- irsp = &rspiocb->iocb;
- ndlp = cmdiocb->context1;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(rspiocb);
+ iotag = get_wqe_reqtag(rspiocb);
+ } else {
+ irsp_iocb = &rspiocb->iocb;
+ tmo = irsp_iocb->ulpTimeout;
+ iotag = irsp_iocb->ulpIoTag;
+ }
lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD,
"EDC cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ iotag, ulp_status, ulp_word4, tmo);
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
if (!pcmd)
goto out;
@@ -3928,7 +3976,7 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* Need to clear signal values, send features MB and RDF with FPIN. */
- if (irsp->ulpStatus)
+ if (ulp_status)
goto out;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
@@ -3940,7 +3988,8 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
/* ELS cmd tag <ulpIoTag> completes */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"4676 Fabric EDC Rsp: "
"0x%02x, 0x%08x\n",
edc_rsp->acc_hdr.la_cmd,
@@ -3977,18 +4026,18 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6462 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
- lpfc_printf_log(
- phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
"4617 Link Fault Desc Data: 0x%08x 0x%08x "
"0x%08x 0x%08x 0x%08x\n",
be32_to_cpu(plnkflt->desc_tag),
@@ -4068,8 +4117,26 @@ out:
}
static void
-lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
+lpfc_format_edc_lft_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
{
+ struct fc_diag_lnkflt_desc *lft = (struct fc_diag_lnkflt_desc *)tlv;
+
+ lft->desc_tag = cpu_to_be32(ELS_DTAG_LNK_FAULT_CAP);
+ lft->desc_len = cpu_to_be32(
+ FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_lnkflt_desc));
+
+ lft->degrade_activate_threshold =
+ cpu_to_be32(phba->degrade_activate_threshold);
+ lft->degrade_deactivate_threshold =
+ cpu_to_be32(phba->degrade_deactivate_threshold);
+ lft->fec_degrade_interval = cpu_to_be32(phba->fec_degrade_interval);
+}
+
+static void
+lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
+{
+ struct fc_diag_cg_sig_desc *cgd = (struct fc_diag_cg_sig_desc *)tlv;
+
/* We are assuming cgd was zero'ed before calling this routine */
/* Configure the congestion detection capability */
@@ -4113,6 +4180,23 @@ lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd)
cpu_to_be16(EDC_CG_SIGFREQ_MSEC);
}
+static bool
+lpfc_link_is_lds_capable(struct lpfc_hba *phba)
+{
+ if (!(phba->lmt & LMT_64Gb))
+ return false;
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ return false;
+
+ if (phba->sli4_hba.conf_trunk) {
+ if (phba->trunk_link.phy_lnk_speed == LPFC_USER_LINK_SPEED_64G)
+ return true;
+ } else if (phba->fc_linkspeed == LPFC_LINK_SPEED_64GHZ) {
+ return true;
+ }
+ return false;
+}
+
/**
* lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric.
* @vport: pointer to a host virtual N_Port data structure.
@@ -4140,12 +4224,12 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
- struct lpfc_els_edc_req *edc_req;
- struct fc_diag_cg_sig_desc *cgn_desc;
+ struct fc_els_edc *edc_req;
+ struct fc_tlv_desc *tlv;
u16 cmdsize;
struct lpfc_nodelist *ndlp;
u8 *pcmd = NULL;
- u32 edc_req_size, cgn_desc_size;
+ u32 cgn_desc_size, lft_desc_size;
int rc;
if (vport->port_type == LPFC_NPIV_PORT)
@@ -4155,40 +4239,48 @@ lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry)
if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
return -ENODEV;
- /* If HBA doesn't support signals, drop into RDF */
- if (!phba->cgn_init_reg_signal)
+ cgn_desc_size = (phba->cgn_init_reg_signal) ?
+ sizeof(struct fc_diag_cg_sig_desc) : 0;
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize = cgn_desc_size + lft_desc_size;
+
+ /* Skip EDC if no applicable descriptors */
+ if (!cmdsize)
goto try_rdf;
- edc_req_size = sizeof(struct fc_els_edc);
- cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
- cmdsize = edc_req_size + cgn_desc_size;
+ cmdsize += sizeof(struct fc_els_edc);
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, ELS_CMD_EDC);
if (!elsiocb)
goto try_rdf;
/* Configure the payload for the supported Diagnostics capabilities. */
- pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_req = (struct lpfc_els_edc_req *)pcmd;
- edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size);
- edc_req->edc.edc_cmd = ELS_EDC;
-
- cgn_desc = &edc_req->cgn_desc;
+ edc_req = (struct fc_els_edc *)pcmd;
+ edc_req->desc_len = cpu_to_be32(cgn_desc_size + lft_desc_size);
+ edc_req->edc_cmd = ELS_EDC;
+ tlv = edc_req->desc;
- lpfc_format_edc_cgn_desc(phba, cgn_desc);
+ if (cgn_desc_size) {
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ tlv = fc_tlv_next_desc(tlv);
+ }
- phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
"4623 Xmit EDC to remote "
"NPORT x%x reg_sig x%x reg_fpin:x%x\n",
ndlp->nlp_DID, phba->cgn_reg_signal,
phba->cgn_reg_fpin);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_disc_cmd;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return -EIO;
}
@@ -4465,9 +4557,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ union lpfc_wqe128 *irsp = &rspiocb->wqe;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *elscmd;
struct ls_rjt stat;
int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
@@ -4475,9 +4567,11 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
uint32_t cmd = 0;
uint32_t did;
int link_reset = 0, rc;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- /* Note: context2 may be 0 for internal driver abort
+ /* Note: cmd_dmabuf may be 0 for internal driver abort
* of delays ELS command.
*/
@@ -4490,7 +4584,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
did = ndlp->nlp_DID;
else {
/* We should only hit this case for retrying PLOGI */
- did = irsp->un.elsreq64.remoteID;
+ did = get_job_els_rsp64_did(phba, rspiocb);
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp && (cmd != ELS_CMD_PLOGI))
return 0;
@@ -4498,9 +4592,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Retry ELS: wd7:x%x wd4:x%x did:x%x",
- *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did);
+ *(((uint32_t *)irsp) + 7), ulp_word4, did);
- switch (irsp->ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_FCP_RSP_ERROR:
break;
case IOSTAT_REMOTE_STOP:
@@ -4514,17 +4608,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
break;
case IOSTAT_LOCAL_REJECT:
- switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
+ switch ((ulp_word4 & IOERR_PARAM_MASK)) {
case IOERR_LOOP_OPEN_FAILURE:
- if (cmd == ELS_CMD_FLOGI) {
- if (PCI_DEVICE_ID_HORNET ==
- phba->pcidev->device) {
- phba->fc_topology = LPFC_TOPOLOGY_LOOP;
- phba->pport->fc_myDID = 0;
- phba->alpa_map[0] = 0;
- phba->alpa_map[1] = 0;
- }
- }
if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
delay = 1000;
retry = 1;
@@ -4595,7 +4680,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP) {
retry = 1;
break;
}
@@ -4608,53 +4693,75 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
+ if ((vport->fc_flag & FC_PT2PT) &&
+ cmd == ELS_CMD_NVMEPRLI) {
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ case LSRJT_INVALID_CMD:
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_CMD_UNSUPPORTED:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0168 NVME PRLI LS_RJT "
+ "reason %x port doesn't "
+ "support NVME, disabling NVME\n",
+ stat.un.b.lsRjtRsnCode);
+ retry = 0;
+ vport->fc_flag |= FC_PT2PT_NO_NVME;
+ goto out_retry;
+ }
+ }
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
- /* The driver has a VALID PLOGI but the rport has
- * rejected the PRLI - can't do it now. Delay
- * for 1 second and try again.
- *
- * However, if explanation is REQ_UNSUPPORTED there's
- * no point to retry PRLI.
+ /* Special case for PRLI LS_RJTs. Recall that lpfc
+ * uses a single routine to issue both PRLI FC4 types.
+ * If the PRLI is rejected because that FC4 type
+ * isn't really supported, don't retry and cause
+ * multiple transport registrations. Otherwise, parse
+ * the reason code/reason code explanation and take the
+ * appropriate action.
*/
- if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
- stat.un.b.lsRjtRsnCodeExp !=
- LSEXP_REQ_UNSUPPORTED) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
- break;
- }
-
- /* Legacy bug fix code for targets with PLOGI delays. */
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CMD_IN_PROGRESS) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_ELS | LOG_NODE,
+ "0153 ELS cmd x%x LS_RJT by x%x. "
+ "RsnCode x%x RsnCodeExp x%x\n",
+ cmd, did, stat.un.b.lsRjtRsnCode,
+ stat.un.b.lsRjtRsnCodeExp);
+
+ switch (stat.un.b.lsRjtRsnCodeExp) {
+ case LSEXP_CANT_GIVE_DATA:
+ case LSEXP_CMD_IN_PROGRESS:
if (cmd == ELS_CMD_PLOGI) {
delay = 1000;
maxretry = 48;
}
retry = 1;
break;
- }
- if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_CANT_GIVE_DATA) {
- if (cmd == ELS_CMD_PLOGI) {
+ case LSEXP_REQ_UNSUPPORTED:
+ case LSEXP_NO_RSRC_ASSIGN:
+ /* These explanation codes get no retry. */
+ if (cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI)
+ break;
+ fallthrough;
+ default:
+ /* Limit the delay and retry action to a limited
+ * cmd set. There are other ELS commands where
+ * a retry is not expected.
+ */
+ if (cmd == ELS_CMD_PLOGI ||
+ cmd == ELS_CMD_PRLI ||
+ cmd == ELS_CMD_NVMEPRLI) {
delay = 1000;
- maxretry = 48;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
}
- retry = 1;
- break;
- }
- if (cmd == ELS_CMD_PLOGI) {
- delay = 1000;
- maxretry = lpfc_max_els_tries + 1;
- retry = 1;
break;
}
+
if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
(cmd == ELS_CMD_FDISC) &&
(stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
@@ -4734,12 +4841,9 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* on this rport.
*/
if (stat.un.b.lsRjtRsnCodeExp ==
- LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
- spin_unlock_irq(&ndlp->lock);
- retry = 0;
- goto out_retry;
+ LSEXP_REQ_UNSUPPORTED) {
+ if (cmd == ELS_CMD_PRLI)
+ goto out_retry;
}
break;
}
@@ -4771,7 +4875,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
- !lpfc_error_lost_link(irsp)) {
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* FLOGI retry policy */
retry = 1;
/* retry FLOGI forever */
@@ -4784,7 +4888,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
delay = 5000;
else if (cmdiocb->retry >= 32)
delay = 1000;
- } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ } else if ((cmd == ELS_CMD_FDISC) &&
+ !lpfc_error_lost_link(ulp_status, ulp_word4)) {
/* retry FDISCs every second up to devloss */
retry = 1;
maxretry = vport->cfg_devloss_tmo;
@@ -4821,8 +4926,8 @@ out_retry:
cmd, did, cmdiocb->retry, delay);
if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
- ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
+ ((ulp_status != IOSTAT_LOCAL_REJECT) ||
+ ((ulp_word4 & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES))) {
/* Don't reset timer for no resources */
@@ -4894,15 +4999,15 @@ out_retry:
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0137 No retry ELS command x%x to remote "
"NPORT x%x: Out of Resources: Error:x%x/%x\n",
- cmd, did, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, ulp_status,
+ ulp_word4);
}
else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0108 No retry ELS command x%x to remote "
"NPORT x%x Retried:%d Error:x%x/%x\n",
- cmd, did, cmdiocb->retry, irsp->ulpStatus,
- irsp->un.ulpWord[4]);
+ cmd, did, cmdiocb->retry, ulp_status,
+ ulp_word4);
}
return 0;
}
@@ -4968,10 +5073,10 @@ lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
* command IOCB data structure contains the reference to various associated
* resources, these fields must be set to NULL if the associated reference
* not present:
- * context1 - reference to ndlp
- * context2 - reference to cmd
- * context2->next - reference to rsp
- * context3 - reference to bpl
+ * cmd_dmabuf - reference to cmd.
+ * cmd_dmabuf->next - reference to rsp
+ * rsp_dmabuf - unused
+ * bpl_dmabuf - reference to bpl
*
* It first properly decrements the reference count held on ndlp for the
* IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
@@ -4991,19 +5096,19 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
{
struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
- /* The I/O iocb is complete. Clear the context1 data. */
- elsiocb->context1 = NULL;
+ /* The I/O iocb is complete. Clear the node and first dmbuf */
+ elsiocb->ndlp = NULL;
- /* context2 = cmd, context2->next = rsp, context3 = bpl */
- if (elsiocb->context2) {
- if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
+ /* cmd_dmabuf = cmd, cmd_dmabuf->next = rsp, bpl_dmabuf = bpl */
+ if (elsiocb->cmd_dmabuf) {
+ if (elsiocb->cmd_flag & LPFC_DELAY_MEM_FREE) {
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
- elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
- buf_ptr = elsiocb->context2;
- elsiocb->context2 = NULL;
+ elsiocb->cmd_flag &= ~LPFC_DELAY_MEM_FREE;
+ buf_ptr = elsiocb->cmd_dmabuf;
+ elsiocb->cmd_dmabuf = NULL;
if (buf_ptr) {
buf_ptr1 = NULL;
spin_lock_irq(&phba->hbalock);
@@ -5022,16 +5127,16 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
spin_unlock_irq(&phba->hbalock);
}
} else {
- buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ buf_ptr1 = elsiocb->cmd_dmabuf;
lpfc_els_free_data(phba, buf_ptr1);
- elsiocb->context2 = NULL;
+ elsiocb->cmd_dmabuf = NULL;
}
}
- if (elsiocb->context3) {
- buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+ if (elsiocb->bpl_dmabuf) {
+ buf_ptr = elsiocb->bpl_dmabuf;
lpfc_els_free_bpl(phba, buf_ptr);
- elsiocb->context3 = NULL;
+ elsiocb->bpl_dmabuf = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
@@ -5047,7 +5152,7 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
* Accept (ACC) Response ELS command. This routine is invoked to indicate
* the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
* release the ndlp if it has the last reference remaining (reference count
- * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
+ * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
* field to NULL to inform the following lpfc_els_free_iocb() routine no
* ndlp reference count needs to be decremented. Otherwise, the ndlp
* reference use-count shall be decremented by the lpfc_els_free_iocb()
@@ -5058,14 +5163,16 @@ static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = cmdiocb->vport;
- IOCB_t *irsp;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ACC LOGO cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
+ ulp_status, ulp_word4, ndlp->nlp_DID);
/* ACC to LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0109 ACC to LOGO completes to NPort x%x refcnt %d "
@@ -5083,7 +5190,6 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
-
/* If PLOGI is being retried, PLOGI completion will cleanup the
* node. The NLP_NPR_2B_DISC flag needs to be retained to make
* progress on nodes discovered from last RSCN.
@@ -5103,7 +5209,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Indicate the node has already released, should
* not reference to it from within lpfc_els_free_iocb.
*/
- cmdiocb->context1 = NULL;
+ cmdiocb->ndlp = NULL;
}
}
out:
@@ -5131,14 +5237,10 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
u32 mbx_flag = pmb->mbox_flag;
u32 mbx_cmd = pmb->u.mb.mbxCommand;
- pmb->ctx_buf = NULL;
- pmb->ctx_ndlp = NULL;
-
if (ndlp) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"0006 rpi x%x DID:%x flg:%x %d x%px "
@@ -5161,10 +5263,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_drop_node(ndlp->vport, ndlp);
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
@@ -5184,14 +5283,12 @@ static void
lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
IOCB_t *irsp;
LPFC_MBOXQ_t *mbox = NULL;
- struct lpfc_dmabuf *mp = NULL;
-
- irsp = &rspiocb->iocb;
+ u32 ulp_status, ulp_word4, tmo, did, iotag;
if (!vport) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -5201,37 +5298,44 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ tmo = get_wqe_tmo(cmdiocb);
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ tmo = irsp->ulpTimeout;
+ iotag = irsp->ulpIoTag;
+ }
+
/* Check to see if link went down during discovery */
if (!ndlp || lpfc_els_chk_latt(vport)) {
- if (mbox) {
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
- }
+ if (mbox)
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
goto out;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"ELS rsp cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- cmdiocb->iocb.un.elsreq64.remoteID);
+ ulp_status, ulp_word4, did);
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0110 ELS response tag x%x completes "
- "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
- cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
+ "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n",
+ iotag, ulp_status, ulp_word4, tmo,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
+ ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
if (mbox) {
- if ((rspiocb->iocb.ulpStatus == 0) &&
- (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
+ if (ulp_status == 0
+ && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
(!(vport->fc_flag & FC_PT2PT))) {
- if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
+ ndlp->nlp_state ==
+ NLP_STE_REG_LOGIN_ISSUE) {
lpfc_printf_vlog(vport, KERN_INFO,
LOG_DISCOVERY,
"0314 PLOGI recov "
@@ -5241,14 +5345,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_state,
ndlp->nlp_rpi,
ndlp->nlp_flag);
- mp = mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt,
- mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
- goto out;
+ goto out_free_mbox;
}
}
@@ -5257,7 +5354,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
- goto out;
+ goto out_free_mbox;
mbox->vport = vport;
if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
@@ -5289,12 +5386,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
}
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(mbox, phba->mbox_mem_pool);
+out_free_mbox:
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
}
out:
if (ndlp && shost) {
@@ -5312,12 +5405,15 @@ out:
(vport && vport->port_type == LPFC_NPIV_PORT) &&
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD) &&
ndlp->nlp_flag & NLP_RELEASE_RPI) {
- lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
- ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
- spin_unlock_irq(&ndlp->lock);
- lpfc_drop_node(vport, ndlp);
+ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
+ ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_drop_node(vport, ndlp);
+ }
}
/* Release the originating I/O reference. */
@@ -5343,7 +5439,7 @@ out:
* mailbox command to the HBA later when callback is invoked.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the corresponding
* response ELS IOCB command.
*
@@ -5359,6 +5455,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
+ union lpfc_wqe128 *oldwqe = &oldiocb->wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
struct serv_parm *sp;
@@ -5367,8 +5465,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ELS_PKT *els_pkt_ptr;
struct fc_els_rdf_resp *rdf_resp;
- oldcmd = &oldiocb->iocb;
-
switch (flag) {
case ELS_CMD_ACC:
cmdsize = sizeof(uint32_t);
@@ -5381,10 +5477,26 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -5400,10 +5512,26 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
if (mbox)
elsiocb->context_un.mbox = mbox;
@@ -5462,12 +5590,28 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *) elsiocb->cmd_dmabuf->virt;
- memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
+ memcpy(pcmd, oldiocb->cmd_dmabuf->virt,
sizeof(uint32_t) + sizeof(PRLO));
*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
els_pkt_ptr = (ELS_PKT *) pcmd;
@@ -5484,10 +5628,26 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_ctxt_tag,
+ &oldwqe->xmit_els_rsp.wqe_com));
+
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ bf_get(wqe_rcvoxid,
+ &oldwqe->xmit_els_rsp.wqe_com));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
rdf_resp = (struct fc_els_rdf_resp *)pcmd;
memset(rdf_resp, 0, sizeof(*rdf_resp));
rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
@@ -5509,14 +5669,14 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
ndlp->nlp_flag &= ~NLP_LOGO_ACC;
spin_unlock_irq(&ndlp->lock);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc;
} else {
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
}
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5553,7 +5713,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
* to issue to the HBA later.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the reject response
* ELS IOCB command.
*
@@ -5570,6 +5730,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
@@ -5580,11 +5741,20 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
pcmd += sizeof(uint32_t);
@@ -5599,16 +5769,16 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
rejectError, elsiocb->iotag,
- elsiocb->iocb.ulpContext, ndlp->nlp_DID,
+ get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID,
ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue LS_RJT: did:x%x flg:x%x err:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5654,44 +5824,65 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_els_edc_rsp *edc_rsp;
+ struct fc_els_edc_resp *edc_rsp;
+ struct fc_tlv_desc *tlv;
struct lpfc_iocbq *elsiocb;
IOCB_t *icmd, *cmd;
+ union lpfc_wqe128 *wqe;
+ u32 cgn_desc_size, lft_desc_size;
+ u16 cmdsize;
uint8_t *pcmd;
- int cmdsize, rc;
+ int rc;
- cmdsize = sizeof(struct lpfc_els_edc_rsp);
+ cmdsize = sizeof(struct fc_els_edc_resp);
+ cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc);
+ lft_desc_size = (lpfc_link_is_lds_capable(phba)) ?
+ sizeof(struct fc_diag_lnkflt_desc) : 0;
+ cmdsize += cgn_desc_size + lft_desc_size;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry,
ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- cmd = &cmdiocb->iocb;
- icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
- pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb)); /* Xri / rx_id */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ cmd = &cmdiocb->iocb;
+ icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id;
+ }
+
+ pcmd = elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
- edc_rsp = (struct lpfc_els_edc_rsp *)pcmd;
- edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC;
- edc_rsp->edc_rsp.desc_list_len = cpu_to_be32(
- FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp));
- edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
- edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32(
+ edc_rsp = (struct fc_els_edc_resp *)pcmd;
+ edc_rsp->acc_hdr.la_cmd = ELS_LS_ACC;
+ edc_rsp->desc_list_len = cpu_to_be32(sizeof(struct fc_els_lsri_desc) +
+ cgn_desc_size + lft_desc_size);
+ edc_rsp->lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO);
+ edc_rsp->lsri.desc_len = cpu_to_be32(
FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc));
- edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC;
- lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc);
+ edc_rsp->lsri.rqst_w0.cmd = ELS_EDC;
+ tlv = edc_rsp->desc;
+ lpfc_format_edc_cgn_desc(phba, tlv);
+ tlv = fc_tlv_next_desc(tlv);
+ if (lft_desc_size)
+ lpfc_format_edc_lft_desc(phba, tlv);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue EDC ACC: did:x%x flg:x%x refcnt %d",
ndlp->nlp_DID, ndlp->nlp_flag,
kref_read(&ndlp->kref));
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5726,7 +5917,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the ADISC Accept response
* ELS IOCB command.
*
@@ -5741,10 +5932,12 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
struct lpfc_hba *phba = vport->phba;
ADISC *ap;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(ADISC);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -5752,19 +5945,32 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* XRI / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ /* oxid */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0130 Xmit ADISC ACC response iotag x%x xri: "
"x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -5780,9 +5986,9 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5794,14 +6000,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
return 1;
}
- /* Xmit ELS ACC response tag <ulpIoTag> */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
- "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
- "RPI: x%x, fc_flag x%x\n",
- rc, elsiocb->iotag, elsiocb->sli4_xritag,
- ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
- ndlp->nlp_rpi, vport->fc_flag);
return 0;
}
@@ -5816,7 +6014,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
* and invokes the lpfc_sli_issue_iocb() routine to send out the command.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the PRLI Accept response
* ELS IOCB command.
*
@@ -5834,18 +6032,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
lpfc_vpd_t *vpd;
IOCB_t *icmd;
IOCB_t *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
uint32_t prli_fc4_req, *req_payload;
struct lpfc_dmabuf *req_buf;
int rc;
- u32 elsrspcmd;
+ u32 elsrspcmd, ulp_context;
/* Need the incoming PRLI payload to determine if the ACC is for an
* FC4 or NVME PRLI type. The PRLI type is at word 1.
*/
- req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
+ req_buf = oldiocb->cmd_dmabuf;
req_payload = (((uint32_t *)req_buf->virt) + 1);
/* PRLI type payload is at byte 3 for FCP or NVME. */
@@ -5858,7 +6057,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
if (prli_fc4_req == PRLI_FCP_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
} else {
@@ -5866,23 +6065,34 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
}
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
- ndlp->nlp_DID, elsrspcmd);
+ ndlp->nlp_DID, elsrspcmd);
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0131 Xmit PRLI ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, cmdsize);
*((uint32_t *)(pcmd)) = elsrspcmd;
@@ -5910,7 +6120,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
- } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ } else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
@@ -5954,9 +6164,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -5984,7 +6194,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
* issue the response.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function.
*
* Return code
@@ -5998,10 +6208,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
struct lpfc_hba *phba = vport->phba;
RNID *rn;
IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
+ (2 * sizeof(struct lpfc_name));
@@ -6013,16 +6225,27 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0132 Xmit RNID ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ elsiocb->iotag, ulp_context);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -6055,9 +6278,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -6092,7 +6315,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
struct lpfc_node_rrq *prrq;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
+ pcmd = (uint8_t *)iocb->cmd_dmabuf->virt;
pcmd += sizeof(uint32_t);
rrq = (struct RRQ *)pcmd;
rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
@@ -6104,7 +6327,8 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
be32_to_cpu(bf_get(rrq_did, rrq)),
bf_get(rrq_oxid, rrq),
rxid,
- iocb->iotag, iocb->iocb.ulpContext);
+ get_wqe_reqtag(iocb),
+ get_job_ulpcontext(phba, iocb));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
@@ -6135,12 +6359,18 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
+ IOCB_t *icmd, *oldcmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
int rc;
+ u32 ulp_context;
- cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ cmdsize = oldiocb->wcqe_cmpl.total_data_placed;
+ else
+ cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
/* The accumulated length can exceed the BPL_SIZE. For
* now, use this as the limit
@@ -6152,14 +6382,27 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
if (!elsiocb)
return 1;
- elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb)); /* Xri / rx_id */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ oldcmd = &oldiocb->iocb;
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ ulp_context = elsiocb->iocb.ulpContext;
+ icmd->unsli3.rcvsli3.ox_id =
+ oldcmd->unsli3.rcvsli3.ox_id;
+ }
/* Xmit ECHO ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ elsiocb->iotag, ulp_context);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
@@ -6169,9 +6412,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
phba->fc_stat.elsXmitACC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -6754,12 +6997,14 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
struct lpfc_iocbq *elsiocb;
struct ulp_bde64 *bpl;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct ls_rjt *stat;
struct fc_rdp_res_frame *rdp_res;
uint32_t cmdsize, len;
uint16_t *flag_ptr;
int rc;
+ u32 ulp_context;
if (status != SUCCESS)
goto error;
@@ -6768,24 +7013,33 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
cmdsize = sizeof(struct fc_rdp_res_frame);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
- lpfc_max_els_tries, rdp_context->ndlp,
- rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
+ lpfc_max_els_tries, rdp_context->ndlp,
+ rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2171 Xmit RDP response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- rdp_res = (struct fc_rdp_res_frame *)
- (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ rdp_res = (struct fc_rdp_res_frame *)elsiocb->cmd_dmabuf->virt;
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -6833,18 +7087,17 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
rdp_context->page_a0, vport);
rdp_res->length = cpu_to_be32(len - 8);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
/* Now that we know the true size of the payload, update the BPL */
- bpl = (struct ulp_bde64 *)
- (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
+ bpl = (struct ulp_bde64 *)elsiocb->bpl_dmabuf->virt;
bpl->tus.f.bdeSize = len;
bpl->tus.f.bdeFlags = 0;
bpl->tus.w = le32_to_cpu(bpl->tus.w);
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto free_rdp_context;
}
@@ -6864,19 +7117,30 @@ error:
if (!elsiocb)
goto free_rdp_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rdp_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->ox_id);
+ bf_set(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com,
+ rdp_context->rx_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rdp_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto free_rdp_context;
}
@@ -6889,7 +7153,7 @@ error:
free_rdp_context:
/* This reference put is for the original unsolicited RDP. If the
- * iocb prep failed, there is no reference to remove.
+ * prep failed, there is no reference to remove.
*/
lpfc_nlp_put(ndlp);
kfree(rdp_context);
@@ -6909,18 +7173,19 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
}
if (lpfc_sli4_dump_page_a0(phba, mbox))
- goto prep_mbox_fail;
+ goto rdp_fail;
mbox->vport = rdp_context->ndlp->vport;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
- goto issue_mbox_fail;
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ return 1;
+ }
return 0;
-prep_mbox_fail:
-issue_mbox_fail:
+rdp_fail:
mempool_free(mbox, phba->mbox_mem_pool);
return 1;
}
@@ -6951,7 +7216,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
struct fc_rdp_req_frame *rdp_req;
struct lpfc_rdp_context *rdp_context;
- IOCB_t *cmd = NULL;
+ union lpfc_wqe128 *cmd = NULL;
struct ls_rjt stat;
if (phba->sli_rev < LPFC_SLI_REV4 ||
@@ -6968,7 +7233,7 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
goto error;
}
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -6993,15 +7258,17 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
goto error;
}
- cmd = &cmdiocb->iocb;
+ cmd = &cmdiocb->wqe;
rdp_context->ndlp = lpfc_nlp_get(ndlp);
if (!rdp_context->ndlp) {
kfree(rdp_context);
rjt_err = LSRJT_UNABLE_TPC;
goto error;
}
- rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
- rdp_context->rx_id = cmd->ulpContext;
+ rdp_context->ox_id = bf_get(wqe_rcvoxid,
+ &cmd->xmit_els_rsp.wqe_com);
+ rdp_context->rx_id = bf_get(wqe_ctxt_tag,
+ &cmd->xmit_els_rsp.wqe_com);
rdp_context->cmpl = lpfc_els_rdp_cmpl;
if (lpfc_get_rdp_info(phba, rdp_context)) {
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
@@ -7031,6 +7298,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
@@ -7077,26 +7345,33 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!elsiocb)
goto free_lcb_context;
- lcb_res = (struct fc_lcb_res_frame *)
- (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ lcb_res = (struct fc_lcb_res_frame *)elsiocb->cmd_dmabuf->virt;
memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
- pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
lcb_res->lcb_sub_command = lcb_context->sub_command;
lcb_res->lcb_type = lcb_context->type;
lcb_res->capability = lcb_context->capability;
lcb_res->lcb_frequency = lcb_context->frequency;
lcb_res->lcb_duration = lcb_context->duration;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto out;
}
@@ -7113,16 +7388,24 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
error:
cmdsize = sizeof(struct fc_lcb_res_frame);
elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
- lpfc_max_els_tries, ndlp,
- ndlp->nlp_DID, ELS_CMD_LS_RJT);
+ lpfc_max_els_tries, ndlp,
+ ndlp->nlp_DID, ELS_CMD_LS_RJT);
lpfc_nlp_put(ndlp);
if (!elsiocb)
goto free_lcb_context;
- icmd = &elsiocb->iocb;
- icmd->ulpContext = lcb_context->rx_id;
- icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
- pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, lcb_context->rx_id);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ lcb_context->ox_id);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = lcb_context->rx_id;
+ icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
+ }
+
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
@@ -7131,10 +7414,10 @@ error:
if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto free_lcb_context;
}
@@ -7246,7 +7529,7 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
u8 state, rjt_err = 0;
struct ls_rjt stat;
- pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint8_t *)pcmd->virt;
beacon = (struct fc_lcb_request_frame *)pcmd->virt;
@@ -7286,8 +7569,8 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lcb_context->type = beacon->lcb_type;
lcb_context->frequency = beacon->lcb_frequency;
lcb_context->duration = beacon->lcb_duration;
- lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
- lcb_context->rx_id = cmdiocb->iocb.ulpContext;
+ lcb_context->ox_id = get_job_rcvoxid(phba, cmdiocb);
+ lcb_context->rx_id = get_job_ulpcontext(phba, cmdiocb);
lcb_context->ndlp = lpfc_nlp_get(ndlp);
if (!lcb_context->ndlp) {
rjt_err = LSRJT_UNABLE_TPC;
@@ -7443,10 +7726,10 @@ return_did_out:
static int
lpfc_rscn_recovery_check(struct lpfc_vport *vport)
{
- struct lpfc_nodelist *ndlp = NULL;
+ struct lpfc_nodelist *ndlp = NULL, *n;
/* Move all affected nodes by pending RSCNs to NPR state. */
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ list_for_each_entry_safe(ndlp, n, &vport->fc_nodes, nlp_listp) {
if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
@@ -7492,7 +7775,7 @@ lpfc_send_rscn_event(struct lpfc_vport *vport,
uint32_t payload_len;
struct lpfc_rscn_event_header *rscn_event_data;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
payload_ptr = (uint32_t *) pcmd->virt;
payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
@@ -7552,7 +7835,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
int rscn_id = 0, hba_id = 0;
int i, tmo;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
@@ -7628,6 +7911,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
ndlp, NULL);
+ /* Restart disctmo if its already running */
+ if (vport->fc_flag & FC_DISC_TMO) {
+ tmo = ((phba->fc_ratov * 3) + 3);
+ mod_timer(&vport->fc_disctmo,
+ jiffies +
+ msecs_to_jiffies(1000 * tmo));
+ }
return 0;
}
}
@@ -7647,7 +7937,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Get the array count after successfully have the token */
rscn_cnt = vport->fc_rscn_id_cnt;
/* If we are already processing an RSCN, save the received
- * RSCN payload buffer, cmdiocb->context2 to process later.
+ * RSCN payload buffer, cmdiocb->cmd_dmabuf to process later.
*/
if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -7680,10 +7970,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
} else {
vport->fc_rscn_id_list[rscn_cnt] = pcmd;
vport->fc_rscn_id_cnt++;
- /* If we zero, cmdiocb->context2, the calling
+ /* If we zero, cmdiocb->cmd_dmabuf, the calling
* routine will not try to free it.
*/
- cmdiocb->context2 = NULL;
+ cmdiocb->cmd_dmabuf = NULL;
}
/* Deferred RSCN */
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -7720,10 +8010,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Indicate we are done walking fc_rscn_id_list on this vport */
vport->fc_rscn_flush = 0;
/*
- * If we zero, cmdiocb->context2, the calling routine will
+ * If we zero, cmdiocb->cmd_dmabuf, the calling routine will
* not try to free it.
*/
- cmdiocb->context2 = NULL;
+ cmdiocb->cmd_dmabuf = NULL;
lpfc_set_disctmo(vport);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
@@ -7847,9 +8137,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *lp = (uint32_t *) pcmd->virt;
- IOCB_t *icmd = &cmdiocb->iocb;
+ union lpfc_wqe128 *wqe = &cmdiocb->wqe;
struct serv_parm *sp;
LPFC_MBOXQ_t *mbox;
uint32_t cmd, did;
@@ -7857,6 +8147,9 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t fc_flag = 0;
uint32_t port_state = 0;
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
cmd = *lp++;
sp = (struct serv_parm *) lp;
@@ -7866,7 +8159,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* We should never receive a FLOGI in loop mode, ignore it */
- did = icmd->un.elsreq64.remoteID;
+ did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
/* An FLOGI ELS command <elsCmd> was received from DID <did> in
Loop Mode */
@@ -7908,6 +8201,12 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
}
+ /* External loopback plug insertion detected */
+ phba->link_flag |= LS_EXTERNAL_LOOPBACK;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_LIBDFC,
+ "1119 External Loopback plug detected\n");
+
/* abort the flogi coming back to ourselves
* due to external loopback on the port.
*/
@@ -7962,9 +8261,10 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
/* Defer ACC response until AFTER we issue a FLOGI */
if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
- phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
- phba->defer_flogi_acc_ox_id =
- cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ phba->defer_flogi_acc_rx_id = bf_get(wqe_ctxt_tag,
+ &wqe->xmit_els_rsp.wqe_com);
+ phba->defer_flogi_acc_ox_id = bf_get(wqe_rcvoxid,
+ &wqe->xmit_els_rsp.wqe_com);
vport->fc_myDID = did;
@@ -8013,7 +8313,7 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
RNID *rn;
struct ls_rjt stat;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
lp++;
@@ -8054,7 +8354,7 @@ lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
uint8_t *pcmd;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+ pcmd = (uint8_t *)cmdiocb->cmd_dmabuf->virt;
/* skip over first word of echo command to find echo data */
pcmd += sizeof(uint32_t);
@@ -8130,7 +8430,7 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* response to the RLS.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RLS Accept Response
* ELS IOCB command.
*
@@ -8141,6 +8441,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
int rc = 0;
MAILBOX_t *mb;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct RLS_RSP *rls_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
@@ -8148,10 +8449,11 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t oxid;
uint16_t rxid;
uint32_t cmdsize;
+ u32 ulp_context;
mb = &pmb->u.mb;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ ndlp = pmb->ctx_ndlp;
rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
pmb->ctx_buf = NULL;
@@ -8175,11 +8477,19 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
- icmd = &elsiocb->iocb;
- icmd->ulpContext = rxid;
- icmd->unsli3.rcvsli3.ox_id = oxid;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, rxid);
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, oxid);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
+ }
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
rls_rsp = (struct RLS_RSP *)pcmd;
@@ -8195,13 +8505,13 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
"2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return;
}
@@ -8239,6 +8549,8 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
+ u32 ctx = get_job_ulpcontext(phba, cmdiocb);
+ u32 ox_id = get_job_rcvoxid(phba, cmdiocb);
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
@@ -8249,8 +8561,7 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
mbox->ctx_buf = (void *)((unsigned long)
- ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
- cmdiocb->iocb.ulpContext)); /* rx_id */
+ (ox_id << 16 | ctx));
mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
if (!mbox->ctx_ndlp)
goto node_err;
@@ -8291,7 +8602,7 @@ reject_out:
* Value (RTV) unsolicited IOCB event.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RTV Accept Response
* ELS IOCB command.
*
@@ -8303,13 +8614,15 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp)
{
int rc = 0;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
struct lpfc_hba *phba = vport->phba;
struct ls_rjt stat;
struct RTV_RSP *rtv_rsp;
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
uint32_t cmdsize;
-
+ u32 ulp_context;
if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))
@@ -8324,13 +8637,23 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
/* use the command's xri in the response */
- elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
- elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, cmdiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, cmdiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, cmdiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, cmdiocb);
+ }
rtv_rsp = (struct RTV_RSP *)pcmd;
@@ -8346,14 +8669,14 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
"2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
"Data: x%x x%x x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi,
rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 0;
}
@@ -8409,7 +8732,7 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
/* For RRQ request, remainder of payload is Exchange IDs */
*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
@@ -8427,19 +8750,21 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue RRQ: did:x%x",
did, rrq->xritag, rrq->rxid);
elsiocb->context_un.rrq = rrq;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rrq;
- lpfc_nlp_get(ndlp);
- elsiocb->context1 = ndlp;
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
+ goto io_err;
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
- if (ret == IOCB_ERROR)
+ if (ret == IOCB_ERROR) {
+ lpfc_nlp_put(ndlp);
goto io_err;
+ }
return 0;
io_err:
lpfc_els_free_iocb(phba, elsiocb);
- lpfc_nlp_put(ndlp);
return 1;
}
@@ -8481,7 +8806,7 @@ lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
* It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the RPL Accept Response
* ELS command.
*
@@ -8495,10 +8820,12 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
{
int rc = 0;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *icmd, *oldcmd;
+ IOCB_t *icmd;
+ union lpfc_wqe128 *wqe;
RPL_RSP rpl_rsp;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
+ u32 ulp_context;
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
ndlp->nlp_DID, ELS_CMD_ACC);
@@ -8506,12 +8833,21 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
if (!elsiocb)
return 1;
- icmd = &elsiocb->iocb;
- oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
- icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+ ulp_context = get_job_ulpcontext(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ /* Xri / rx_id */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ get_job_ulpcontext(phba, oldiocb));
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ get_job_rcvoxid(phba, oldiocb));
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->ulpContext = get_job_ulpcontext(phba, oldiocb);
+ icmd->unsli3.rcvsli3.ox_id = get_job_rcvoxid(phba, oldiocb);
+ }
- pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint16_t);
*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
@@ -8530,13 +8866,13 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
"0120 Xmit ELS RPL ACC response tag x%x "
"xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
"rpi x%x\n",
- elsiocb->iotag, elsiocb->iocb.ulpContext,
+ elsiocb->iotag, ulp_context,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitACC++;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
@@ -8591,7 +8927,7 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 0;
}
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
rpl = (RPL *) (lp + 1);
maxsize = be32_to_cpu(rpl->maxsize);
@@ -8639,13 +8975,11 @@ lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
FARP *fp;
uint32_t cnt, did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
lp++;
@@ -8712,13 +9046,11 @@ lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
- IOCB_t *icmd;
uint32_t did;
- icmd = &cmdiocb->iocb;
- did = icmd->un.elsreq64.remoteID;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
+ did = get_job_els_rsp64_did(vport->phba, cmdiocb);
+ pcmd = cmdiocb->cmd_dmabuf;
+ lp = (uint32_t *)pcmd->virt;
lp++;
/* FARP-RSP received from DID <did> */
@@ -8758,7 +9090,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
FAN *fp;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
- lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ lp = (uint32_t *)cmdiocb->cmd_dmabuf->virt;
fp = (FAN *) ++lp;
/* FAN received; Fan does not have a reply sequence */
if ((vport == phba->pport) &&
@@ -8805,15 +9137,16 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
uint32_t *ptr, dtag;
const char *dtag_nm;
int desc_cnt = 0, bytes_remain;
- bool rcv_cap_desc = false;
+ struct fc_diag_lnkflt_desc *plnkflt;
- payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ payload = cmdiocb->cmd_dmabuf->virt;
edc_req = (struct fc_els_edc *)payload;
bytes_remain = be32_to_cpu(edc_req->desc_len);
ptr = (uint32_t *)payload;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"3319 Rcv EDC payload len %d: x%x x%x x%x\n",
bytes_remain, be32_to_cpu(*ptr),
be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2)));
@@ -8832,9 +9165,10 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
* cycle through EDC diagnostic descriptors to find the
* congestion signaling capability descriptor
*/
- while (bytes_remain && !rcv_cap_desc) {
+ while (bytes_remain) {
if (bytes_remain < FC_TLV_DESC_HDR_SZ) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6464 Truncated TLV hdr on "
"Diagnostic descriptor[%d]\n",
desc_cnt);
@@ -8847,16 +9181,27 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
FC_TLV_DESC_SZ_FROM_LENGTH(tlv) !=
sizeof(struct fc_diag_lnkflt_desc)) {
- lpfc_printf_log(
- phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6465 Truncated Link Fault Diagnostic "
"descriptor[%d]: %d vs 0x%zx 0x%zx\n",
desc_cnt, bytes_remain,
FC_TLV_DESC_SZ_FROM_LENGTH(tlv),
- sizeof(struct fc_diag_cg_sig_desc));
+ sizeof(struct fc_diag_lnkflt_desc));
goto out;
}
- /* No action for Link Fault descriptor for now */
+ plnkflt = (struct fc_diag_lnkflt_desc *)tlv;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_ELS | LOG_LDS_EVENT,
+ "4626 Link Fault Desc Data: x%08x len x%x "
+ "da x%x dd x%x interval x%x\n",
+ be32_to_cpu(plnkflt->desc_tag),
+ be32_to_cpu(plnkflt->desc_len),
+ be32_to_cpu(
+ plnkflt->degrade_activate_threshold),
+ be32_to_cpu(
+ plnkflt->degrade_deactivate_threshold),
+ be32_to_cpu(plnkflt->fec_degrade_interval));
break;
case ELS_DTAG_CG_SIGNAL_CAP:
if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) ||
@@ -8883,11 +9228,11 @@ lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_least_capable_settings(
phba, (struct fc_diag_cg_sig_desc *)tlv);
- rcv_cap_desc = true;
break;
default:
dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
- lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
+ lpfc_printf_log(phba, KERN_WARNING,
+ LOG_ELS | LOG_CGN_MGMT | LOG_LDS_EVENT,
"6467 unknown Diagnostic "
"Descriptor[%d]: tag x%x (%s)\n",
desc_cnt, dtag, dtag_nm);
@@ -8955,6 +9300,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
uint32_t timeout;
uint32_t remote_ID = 0xffffffff;
LIST_HEAD(abort_list);
+ u32 ulp_command = 0, ulp_context = 0, did = 0, iotag = 0;
timeout = (uint32_t)(phba->fc_ratov << 1);
@@ -8971,17 +9317,27 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
+ ulp_context = get_job_ulpcontext(phba, piocb);
+ did = get_job_els_rsp64_did(phba, piocb);
- if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
- piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(piocb);
+ } else {
+ cmd = &piocb->iocb;
+ iotag = cmd->ulpIoTag;
+ }
+
+ if ((piocb->cmd_flag & LPFC_IO_LIBDFC) != 0 ||
+ ulp_command == CMD_ABORT_XRI_CX ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN)
continue;
if (piocb->vport != vport)
continue;
- pcmd = (struct lpfc_dmabuf *) piocb->context2;
+ pcmd = piocb->cmd_dmabuf;
if (pcmd)
els_command = *(uint32_t *) (pcmd->virt);
@@ -8999,11 +9355,11 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
}
remote_ID = 0xffffffff;
- if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
- remote_ID = cmd->un.elsreq64.remoteID;
- else {
+ if (ulp_command != CMD_GEN_REQUEST64_CR) {
+ remote_ID = did;
+ } else {
struct lpfc_nodelist *ndlp;
- ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
+ ndlp = __lpfc_findnode_rpi(vport, ulp_context);
if (ndlp)
remote_ID = ndlp->nlp_DID;
}
@@ -9014,11 +9370,11 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
- cmd = &piocb->iocb;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0127 ELS timeout Data: x%x x%x x%x "
"x%x\n", els_command,
- remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
+ remote_ID, ulp_command, iotag);
+
spin_lock_irq(&phba->hbalock);
list_del_init(&piocb->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
@@ -9061,7 +9417,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
- IOCB_t *cmd = NULL;
+ u32 ulp_command;
unsigned long iflags = 0;
lpfc_fabric_abort_vport(vport);
@@ -9086,20 +9442,20 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->iocb_flag & LPFC_IO_LIBDFC)
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
if (piocb->vport != vport)
continue;
- if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
continue;
/* On the ELS ring we can have ELS_REQUESTs or
* GEN_REQUESTs waiting for a response.
*/
- cmd = &piocb->iocb;
- if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
+ ulp_command = get_job_cmnd(phba, piocb);
+ if (ulp_command == CMD_ELS_REQUEST64_CR) {
list_add_tail(&piocb->dlist, &abort_list);
/* If the link is down when flushing ELS commands
@@ -9110,9 +9466,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* and avoid any retry logic.
*/
if (phba->link_state == LPFC_LINK_DOWN)
- piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
+ piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
}
- if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
+ if (ulp_command == CMD_GEN_REQUEST64_CR)
list_add_tail(&piocb->dlist, &abort_list);
}
@@ -9143,17 +9499,17 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* just queue them up for lpfc_sli_cancel_iocbs
*/
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
- cmd = &piocb->iocb;
+ ulp_command = get_job_cmnd(phba, piocb);
- if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
- }
/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
- if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
- cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
- cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmd->ulpCommand == CMD_ABORT_XRI_CN)
+ if (ulp_command == CMD_QUE_RING_BUF_CN ||
+ ulp_command == CMD_QUE_RING_BUF64_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_CX)
continue;
if (piocb->vport != vport)
@@ -9167,7 +9523,6 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (vport == phba->pport) {
list_for_each_entry_safe(piocb, tmp_iocb,
&phba->fabric_iocb_list, list) {
- cmd = &piocb->iocb;
list_del_init(&piocb->list);
list_add_tail(&piocb->list, &abort_list);
}
@@ -9235,22 +9590,25 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
struct ls_rjt stat;
struct lpfc_nodelist *ndlp;
uint32_t *pcmd;
+ u32 ulp_status, ulp_word4;
- ndlp = cmdiocbp->context1;
+ ndlp = cmdiocbp->ndlp;
if (!ndlp)
return;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
+ ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ ulp_word4 = get_job_word4(phba, rspiocbp);
+
+ if (ulp_status == IOSTAT_LS_RJT) {
lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
sizeof(struct lpfc_name));
memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
sizeof(struct lpfc_name));
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- cmdiocbp->context2)->virt);
+ pcmd = (uint32_t *)cmdiocbp->cmd_dmabuf->virt;
lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
- stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be = cpu_to_be32(ulp_word4);
lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
fc_host_post_vendor_event(shost,
@@ -9260,10 +9618,10 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba,
LPFC_NL_VENDOR_ID);
return;
}
- if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
- (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
+ if (ulp_status == IOSTAT_NPORT_BSY ||
+ ulp_status == IOSTAT_FABRIC_BSY) {
fabric_event.event_type = FC_REG_FABRIC_EVENT;
- if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
+ if (ulp_status == IOSTAT_NPORT_BSY)
fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
else
fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
@@ -9589,11 +9947,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
/* Take action here for an Alarm event */
if (phba->cmf_active_mode != LPFC_CFG_OFF) {
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) {
- /* Track of alarm cnt for cgn_info */
- atomic_inc(&phba->cgn_fabric_alarm_cnt);
/* Track of alarm cnt for SYNC_WQE */
atomic_inc(&phba->cgn_sync_alarm_cnt);
}
+ /* Track alarm cnt for cgn_info regardless
+ * of whether CMF is configured for Signals
+ * or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_alarm_cnt);
goto cleanup;
}
break;
@@ -9601,11 +9962,14 @@ lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv)
/* Take action here for a Warning event */
if (phba->cmf_active_mode != LPFC_CFG_OFF) {
if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) {
- /* Track of warning cnt for cgn_info */
- atomic_inc(&phba->cgn_fabric_warn_cnt);
/* Track of warning cnt for SYNC_WQE */
atomic_inc(&phba->cgn_sync_warn_cnt);
}
+ /* Track warning cnt and freq for cgn_info
+ * regardless of whether CMF is configured for
+ * Signals or FPINs.
+ */
+ atomic_inc(&phba->cgn_fabric_warn_cnt);
cleanup:
/* Save frequency in ms */
phba->cgn_fpin_frequency =
@@ -9614,14 +9978,10 @@ cleanup:
if (phba->cgn_i) {
cp = (struct lpfc_cgn_info *)
phba->cgn_i->virt;
- if (phba->cgn_reg_fpin &
- LPFC_CGN_FPIN_ALARM)
- cp->cgn_alarm_freq =
- cpu_to_le16(value);
- if (phba->cgn_reg_fpin &
- LPFC_CGN_FPIN_WARN)
- cp->cgn_warn_freq =
- cpu_to_le16(value);
+ cp->cgn_alarm_freq =
+ cpu_to_le16(value);
+ cp->cgn_warn_freq =
+ cpu_to_le16(value);
crc = lpfc_cgn_calc_crc32
(cp,
LPFC_CGN_INFO_SZ,
@@ -9776,27 +10136,32 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
- uint32_t *payload, payload_len;
- uint32_t cmd, did, newnode;
+ u32 *payload, payload_len;
+ u32 cmd = 0, did = 0, newnode, status = 0;
uint8_t rjt_exp, rjt_err = 0, init_link = 0;
- IOCB_t *icmd = &elsiocb->iocb;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
LPFC_MBOXQ_t *mbox;
- if (!vport || !(elsiocb->context2))
+ if (!vport || !elsiocb->cmd_dmabuf)
goto dropit;
newnode = 0;
- payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
- payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
+ payload = elsiocb->cmd_dmabuf->virt;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ payload_len = wcqe_cmpl->total_data_placed;
+ else
+ payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
+ status = get_job_ulpstatus(phba, elsiocb);
cmd = *payload;
if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
- lpfc_post_buffer(phba, pring, 1);
+ lpfc_sli3_post_buffer(phba, pring, 1);
- did = icmd->un.rcvels.remoteID;
- if (icmd->ulpStatus) {
+ did = get_job_els_rsp64_did(phba, elsiocb);
+ if (status) {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV Unsol ELS: status:x%x/x%x did:x%x",
- icmd->ulpStatus, icmd->un.ulpWord[4], did);
+ status, get_job_word4(phba, elsiocb), did);
goto dropit;
}
@@ -9843,8 +10208,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1)
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
goto dropit;
elsiocb->vport = vport;
@@ -9882,7 +10247,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
* the vfi. This is done in lpfc_rcv_plogi but
* that is called after the reg_vfi.
*/
- vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+ vport->fc_myDID =
+ bf_get(els_rsp64_sid,
+ &elsiocb->wqe.xmit_els_rsp);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"3312 Remote port assigned DID x%x "
"%x\n", vport->fc_myDID,
@@ -9935,6 +10302,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
+ /* retain node if our response is deferred */
+ if (phba->defer_flogi_acc_flag)
+ break;
if (newnode)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
@@ -10195,8 +10565,8 @@ lsrjt:
}
/* Release the reference on this elsiocb, not the ndlp. */
- lpfc_nlp_put(elsiocb->context1);
- elsiocb->context1 = NULL;
+ lpfc_nlp_put(elsiocb->ndlp);
+ elsiocb->ndlp = NULL;
/* Special case. Driver received an unsolicited command that
* unsupportable given the driver's current state. Reset the
@@ -10224,8 +10594,9 @@ dropit:
if (vport && !(vport->load_flag & FC_UNLOADING))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0111 Dropping received ELS cmd "
- "Data: x%x x%x x%x\n",
- icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
+ "Data: x%x x%x x%x x%x\n",
+ cmd, status, get_job_word4(phba, elsiocb), did);
+
phba->fc_stat.elsRcvDrop++;
}
@@ -10245,77 +10616,93 @@ void
lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *elsiocb)
{
- struct lpfc_vport *vport = phba->pport;
- IOCB_t *icmd = &elsiocb->iocb;
+ struct lpfc_vport *vport = elsiocb->vport;
+ u32 ulp_command, status, parameter, bde_count = 0;
+ IOCB_t *icmd;
+ struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
+ struct lpfc_dmabuf *bdeBuf1 = elsiocb->cmd_dmabuf;
+ struct lpfc_dmabuf *bdeBuf2 = elsiocb->bpl_dmabuf;
dma_addr_t paddr;
- struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
- struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
- elsiocb->context1 = NULL;
- elsiocb->context2 = NULL;
- elsiocb->context3 = NULL;
+ elsiocb->cmd_dmabuf = NULL;
+ elsiocb->rsp_dmabuf = NULL;
+ elsiocb->bpl_dmabuf = NULL;
+
+ wcqe_cmpl = &elsiocb->wcqe_cmpl;
+ ulp_command = get_job_cmnd(phba, elsiocb);
+ status = get_job_ulpstatus(phba, elsiocb);
+ parameter = get_job_word4(phba, elsiocb);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ bde_count = wcqe_cmpl->word3;
+ else
+ bde_count = elsiocb->iocb.ulpBdeCount;
- if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
+ if (status == IOSTAT_NEED_BUFFER) {
lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
- } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ } else if (status == IOSTAT_LOCAL_REJECT &&
+ (parameter & IOERR_PARAM_MASK) ==
IOERR_RCV_BUFFER_WAITING) {
phba->fc_stat.NoRcvBuf++;
/* Not enough posted buffers; Try posting more buffers */
if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
- lpfc_post_buffer(phba, pring, 0);
+ lpfc_sli3_post_buffer(phba, pring, 0);
return;
}
- if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
- (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
- icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
- if (icmd->unsli3.rcvsli3.vpi == 0xffff)
- vport = phba->pport;
- else
- vport = lpfc_find_vport_by_vpid(phba,
+ if (phba->sli_rev == LPFC_SLI_REV3) {
+ icmd = &elsiocb->iocb;
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (ulp_command == CMD_IOCB_RCV_ELS64_CX ||
+ ulp_command == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (icmd->unsli3.rcvsli3.vpi == 0xffff)
+ vport = phba->pport;
+ else
+ vport = lpfc_find_vport_by_vpid(phba,
icmd->unsli3.rcvsli3.vpi);
+ }
}
/* If there are no BDEs associated
* with this IOCB, there is nothing to do.
*/
- if (icmd->ulpBdeCount == 0)
+ if (bde_count == 0)
return;
- /* type of ELS cmd is first 32bit word
- * in packet
- */
+ /* Account for SLI2 or SLI3 and later unsolicited buffering */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
- elsiocb->context2 = bdeBuf1;
+ elsiocb->cmd_dmabuf = bdeBuf1;
+ if (bde_count == 2)
+ elsiocb->bpl_dmabuf = bdeBuf2;
} else {
+ icmd = &elsiocb->iocb;
paddr = getPaddr(icmd->un.cont64[0].addrHigh,
icmd->un.cont64[0].addrLow);
- elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
- paddr);
+ elsiocb->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ if (bde_count == 2) {
+ paddr = getPaddr(icmd->un.cont64[1].addrHigh,
+ icmd->un.cont64[1].addrLow);
+ elsiocb->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
/*
* The different unsolicited event handlers would tell us
- * if they are done with "mp" by setting context2 to NULL.
+ * if they are done with "mp" by setting cmd_dmabuf to NULL.
*/
- if (elsiocb->context2) {
- lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
- elsiocb->context2 = NULL;
- }
-
- /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
- if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
- icmd->ulpBdeCount == 2) {
- elsiocb->context2 = bdeBuf2;
- lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
- /* free mp if we are done with it */
- if (elsiocb->context2) {
- lpfc_in_buf_free(phba, elsiocb->context2);
- elsiocb->context2 = NULL;
- }
+ if (elsiocb->cmd_dmabuf) {
+ lpfc_in_buf_free(phba, elsiocb->cmd_dmabuf);
+ elsiocb->cmd_dmabuf = NULL;
}
+
+ if (elsiocb->bpl_dmabuf) {
+ lpfc_in_buf_free(phba, elsiocb->bpl_dmabuf);
+ elsiocb->bpl_dmabuf = NULL;
+ }
+
}
static void
@@ -10426,7 +10813,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
MAILBOX_t *mb = &pmb->u.mb;
int rc;
@@ -10655,9 +11042,11 @@ lpfc_fabric_login_reqd(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
- (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
+ if (ulp_status != IOSTAT_FABRIC_RJT ||
+ ulp_word4 != RJT_LOGIN_REQUIRED)
return 0;
else
return 1;
@@ -10689,18 +11078,21 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
- IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
- struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf, *prsp;
struct serv_parm *sp;
uint8_t fabric_param_changed;
+ u32 ulp_status, ulp_word4;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
+ ulp_status, ulp_word4,
vport->fc_prevDID);
/* Since all FDISCs are being single threaded, we
* must reset the discovery timer for ALL vports
@@ -10712,9 +11104,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"FDISC cmpl: status:x%x/x%x prevdid:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
+ ulp_status, ulp_word4, vport->fc_prevDID);
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
lpfc_retry_pport_discovery(phba);
@@ -10727,7 +11119,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* FDISC failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0126 FDISC failed. (x%x/x%x)\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto fdisc_failed;
}
@@ -10741,7 +11133,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_flag |= FC_PUBLIC_LOOP;
spin_unlock_irq(shost->host_lock);
- vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
+ vport->fc_myDID = ulp_word4 & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -10828,7 +11220,7 @@ out:
* IOCB will be sent off HBA at any given time.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the FDISC ELS command.
*
* Return code
@@ -10841,6 +11233,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
IOCB_t *icmd;
+ union lpfc_wqe128 *wqe = NULL;
struct lpfc_iocbq *elsiocb;
struct serv_parm *sp;
uint8_t *pcmd;
@@ -10860,20 +11253,19 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
- icmd = &elsiocb->iocb;
- icmd->un.elsreq64.myID = 0;
- icmd->un.elsreq64.fl = 1;
-
- /*
- * SLI3 ports require a different context type value than SLI4.
- * Catch SLI3 ports here and override the prep.
- */
- if (phba->sli_rev == LPFC_SLI_REV3) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wqe = &elsiocb->wqe;
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ } else {
+ icmd = &elsiocb->iocb;
+ icmd->un.elsreq64.myID = 0;
+ icmd->un.elsreq64.fl = 1;
icmd->ulpCt_h = 1;
icmd->ulpCt_l = 0;
}
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
pcmd += sizeof(uint32_t); /* CSP Word 1 */
memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
@@ -10899,21 +11291,18 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_set_disctmo(vport);
phba->fc_stat.elsXmitFDISC++;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_fdisc;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue FDISC: did:x%x",
did, 0, 0);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
- lpfc_els_free_iocb(phba, elsiocb);
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp)
goto err_out;
- }
rc = lpfc_issue_fabric_iocb(phba, elsiocb);
if (rc == IOCB_ERROR) {
- lpfc_els_free_iocb(phba, elsiocb);
lpfc_nlp_put(ndlp);
goto err_out;
}
@@ -10922,6 +11311,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 0;
err_out:
+ lpfc_els_free_iocb(phba, elsiocb);
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0256 Issue FDISC: Cannot send IOCB\n");
@@ -10950,23 +11340,36 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ u32 ulp_status, ulp_word4, did, tmo;
+
+ ndlp = cmdiocb->ndlp;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+ tmo = get_wqe_tmo(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ did = get_job_els_rsp64_did(phba, rspiocb);
+ tmo = irsp->ulpTimeout;
+ }
- ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
- irsp = &rspiocb->iocb;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"LOGO npiv cmpl: status:x%x/x%x did:x%x",
- irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
+ ulp_status, ulp_word4, did);
/* NPIV LOGO completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2928 NPIV LOGO completes to NPort x%x "
"Data: x%x x%x x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes,
+ ndlp->nlp_DID, ulp_status, ulp_word4,
+ tmo, vport->num_disc_nodes,
kref_read(&ndlp->kref), ndlp->nlp_flag,
ndlp->fc4_xpt_flags);
- if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ if (ulp_status == IOSTAT_SUCCESS) {
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_NDISC_ACTIVE;
vport->fc_flag &= ~FC_FABRIC;
@@ -10974,10 +11377,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
+ if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ /* Wake up lpfc_vport_delete if waiting...*/
+ if (ndlp->logo_waitq)
+ wake_up(ndlp->logo_waitq);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
/* Safe to release resources now. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
}
/**
@@ -10988,7 +11400,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* This routine issues a LOGO ELS command to an @ndlp off a @vport.
*
* Note that the ndlp reference count will be incremented by 1 for holding the
- * ndlp and the reference to ndlp will be stored into the context1 field of
+ * ndlp and the reference to ndlp will be stored into the ndlp field of
* the IOCB for the completion callback function to the LOGO ELS command.
*
* Return codes
@@ -11010,7 +11422,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!elsiocb)
return 1;
- pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ pcmd = (uint8_t *)elsiocb->cmd_dmabuf->virt;
*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
pcmd += sizeof(uint32_t);
@@ -11023,12 +11435,12 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
"Issue LOGO npiv did:x%x flg:x%x",
ndlp->nlp_DID, ndlp->nlp_flag, 0);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo;
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_LOGO_SND;
spin_unlock_irq(&ndlp->lock);
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(phba, elsiocb);
goto err;
}
@@ -11093,7 +11505,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
struct lpfc_iocbq *iocb;
unsigned long iflags;
int ret;
- IOCB_t *cmd;
repeat:
iocb = NULL;
@@ -11108,24 +11519,23 @@ repeat:
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched1: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched1: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmd = &iocb->iocb;
- cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
- cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
- iocb->iocb_cmpl(phba, iocb, iocb);
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ set_job_ulpstatus(iocb, IOSTAT_LOCAL_REJECT);
+ iocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ iocb->cmd_cmpl(phba, iocb, iocb);
atomic_dec(&phba->fabric_iocb_count);
goto repeat;
@@ -11181,26 +11591,27 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
* @rspiocb: pointer to lpfc response iocb data structure.
*
* This routine is the callback function that is put to the fabric iocb's
- * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
- * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
+ * callback function pointer (iocb->cmd_cmpl). The original iocb's callback
+ * function pointer has been stored in iocb->fabric_cmd_cmpl. This callback
* function first restores and invokes the original iocb's callback function
* and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
* fabric bound iocb from the driver internal fabric iocb list onto the wire.
**/
static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_iocbq *rspiocb)
+ struct lpfc_iocbq *rspiocb)
{
struct ls_rjt stat;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
- BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
+ WARN_ON((cmdiocb->cmd_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
- switch (rspiocb->iocb.ulpStatus) {
+ switch (ulp_status) {
case IOSTAT_NPORT_RJT:
case IOSTAT_FABRIC_RJT:
- if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
+ if (ulp_word4 & RJT_UNAVAIL_TEMP)
lpfc_block_fabric_iocbs(phba);
- }
break;
case IOSTAT_NPORT_BSY:
@@ -11209,8 +11620,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
case IOSTAT_LS_RJT:
- stat.un.lsRjtError =
- be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
+ stat.un.ls_rjt_error_be =
+ cpu_to_be32(ulp_word4);
if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
lpfc_block_fabric_iocbs(phba);
@@ -11219,10 +11630,10 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
- cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
- cmdiocb->fabric_iocb_cmpl = NULL;
- cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
- cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
+ cmdiocb->cmd_cmpl = cmdiocb->fabric_cmd_cmpl;
+ cmdiocb->fabric_cmd_cmpl = NULL;
+ cmdiocb->cmd_flag &= ~LPFC_IO_FABRIC;
+ cmdiocb->cmd_cmpl(phba, cmdiocb, rspiocb);
atomic_dec(&phba->fabric_iocb_count);
if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
@@ -11273,20 +11684,20 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
atomic_inc(&phba->fabric_iocb_count);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ready) {
- iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
- iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
- iocb->iocb_flag |= LPFC_IO_FABRIC;
+ iocb->fabric_cmd_cmpl = iocb->cmd_cmpl;
+ iocb->cmd_cmpl = lpfc_cmpl_fabric_iocb;
+ iocb->cmd_flag |= LPFC_IO_FABRIC;
lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
- "Fabric sched2: ste:x%x",
- iocb->vport->port_state, 0, 0);
+ "Fabric sched2: ste:x%x",
+ iocb->vport->port_state, 0, 0);
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
if (ret == IOCB_ERROR) {
- iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
- iocb->fabric_iocb_cmpl = NULL;
- iocb->iocb_flag &= ~LPFC_IO_FABRIC;
+ iocb->cmd_cmpl = iocb->fabric_cmd_cmpl;
+ iocb->fabric_cmd_cmpl = NULL;
+ iocb->cmd_flag &= ~LPFC_IO_FABRIC;
atomic_dec(&phba->fabric_iocb_count);
}
} else {
@@ -11588,11 +11999,12 @@ lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *prsp = NULL;
struct lpfc_vmid_priority_range *vmid_range = NULL;
u32 *data;
- struct lpfc_dmabuf *dmabuf = cmdiocb->context2;
- IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *dmabuf = cmdiocb->cmd_dmabuf;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
u8 *pcmd, max_desc;
u32 len, i;
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -11606,10 +12018,10 @@ lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
data[0], data[1]);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
"6529 QFPA failed with status x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
@@ -11688,15 +12100,15 @@ int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
if (!elsiocb)
return -ENOMEM;
- pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
*((u32 *)(pcmd)) = ELS_CMD_QFPA;
pcmd += 4;
- elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_qfpa;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(vport->phba, elsiocb);
return -ENXIO;
}
@@ -11743,7 +12155,7 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
vmid_context->nlp = ndlp;
vmid_context->instantiated = instantiated;
elsiocb->vmid_tag.vmid_context = vmid_context;
- pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
+ pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
@@ -11776,10 +12188,10 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
}
inst_desc->word6 = cpu_to_be32(inst_desc->word6);
- elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem;
+ elsiocb->cmd_cmpl = lpfc_cmpl_els_uvem;
- elsiocb->context1 = lpfc_nlp_get(ndlp);
- if (!elsiocb->context1) {
+ elsiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!elsiocb->ndlp) {
lpfc_els_free_iocb(vport->phba, elsiocb);
goto out;
}
@@ -11805,11 +12217,12 @@ lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
struct lpfc_dmabuf *prsp = NULL;
struct lpfc_vmid_context *vmid_context =
icmdiocb->vmid_tag.vmid_context;
- struct lpfc_nodelist *ndlp = icmdiocb->context1;
+ struct lpfc_nodelist *ndlp = icmdiocb->ndlp;
u8 *pcmd;
u32 *data;
- IOCB_t *irsp = &rspiocb->iocb;
- struct lpfc_dmabuf *dmabuf = icmdiocb->context2;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ struct lpfc_dmabuf *dmabuf = icmdiocb->cmd_dmabuf;
struct lpfc_vmid *vmid;
vmid = vmid_context->vmp;
@@ -11826,10 +12239,10 @@ lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
"4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
goto out;
}
- if (irsp->ulpStatus) {
+ if (ulp_status) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
"4533 UVEM error status %x: %x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ ulp_status, ulp_word4);
goto out;
}
spin_lock(&phba->hbalock);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9fe6e5b386ce..d38ebd7281b9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
ndlp = rdata->pnode;
if (!rdata->pnode) {
- pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
- __func__, rport, rport->scsi_target_id);
+ pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+ __func__, rport, rport->scsi_target_id);
return -EINVAL;
}
@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d\n",
+ "load_flag x%x refcnt %d state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
- vport->load_flag, kref_read(&ndlp->kref));
+ vport->load_flag, kref_read(&ndlp->kref),
+ ndlp->nlp_state, ndlp->fc4_xpt_flags);
/* Don't schedule a worker thread event if the vport is going down.
* The teardown process cleans up the node via lpfc_drop_node.
@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+ /* clear the NLP_XPT_REGD if the node is not registered
+ * with nvme-fc
+ */
+ if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
/* Remove the node reference from remote_port_add now.
* The driver will not call remote_port_delete.
@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
ndlp->rport = NULL;
spin_unlock_irqrestore(&ndlp->lock, iflags);
- /* We need to hold the node by incrementing the reference
- * count until this queued work is done
- */
- evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+ if (phba->worker_thread) {
+ /* We need to hold the node by incrementing the reference
+ * count until this queued work is done
+ */
+ evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (evtp->evt_arg1) {
+ evtp->evt = LPFC_EVT_DEV_LOSS;
+ list_add_tail(&evtp->evt_listp, &phba->work_list);
+ lpfc_worker_wake_up(phba);
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ } else {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ "3188 worker thread is stopped %s x%06x, "
+ " rport x%px flg x%x load_flag x%x refcnt "
+ "%d\n", __func__, ndlp->nlp_DID,
+ ndlp->rport, ndlp->nlp_flag,
+ vport->load_flag, kref_read(&ndlp->kref));
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ /* Node is in dev loss. No further transaction. */
+ ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RM);
+ }
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (evtp->evt_arg1) {
- evtp->evt = LPFC_EVT_DEV_LOSS;
- list_add_tail(&evtp->evt_listp, &phba->work_list);
- lpfc_worker_wake_up(phba);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0203 Devloss timeout on "
"WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "NPort x%06x Data: x%x x%x x%x\n",
+ "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
*name, *(name+1), *(name+2), *(name+3),
*(name+4), *(name+5), *(name+6), *(name+7),
ndlp->nlp_DID, ndlp->nlp_flag,
- ndlp->nlp_state, ndlp->nlp_rpi);
+ ndlp->nlp_state, ndlp->nlp_rpi,
+ kref_read(&ndlp->kref));
} else {
lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
"0204 Devloss timeout on "
@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
int free_evt;
int fcf_inuse;
uint32_t nlp_did;
+ bool hba_pci_err;
spin_lock_irq(&phba->hbalock);
while (!list_empty(&phba->work_list)) {
list_remove_head((&phba->work_list), evtp, typeof(*evtp),
evt_listp);
spin_unlock_irq(&phba->hbalock);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
free_evt = 1;
switch (evtp->evt) {
case LPFC_EVT_ELS_RETRY:
ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
- lpfc_els_retry_delay_handler(ndlp);
- free_evt = 0; /* evt is part of ndlp */
+ if (!hba_pci_err) {
+ lpfc_els_retry_delay_handler(ndlp);
+ free_evt = 0; /* evt is part of ndlp */
+ }
/* decrement the node reference count held
* for this queued work
*/
@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_RECOVER_PORT:
ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
- lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
- free_evt = 0;
+ if (!hba_pci_err) {
+ lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+ free_evt = 0;
+ }
/* decrement the node reference count held for
* this queued work
*/
@@ -859,20 +890,30 @@ lpfc_work_done(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct lpfc_vport *vport;
int i;
+ bool hba_pci_err;
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
spin_lock_irq(&phba->hbalock);
ha_copy = phba->work_ha;
phba->work_ha = 0;
spin_unlock_irq(&phba->hbalock);
+ if (hba_pci_err)
+ ha_copy = 0;
/* First, try to post the next mailbox command to SLI4 device */
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
lpfc_sli4_post_async_mbox(phba);
- if (ha_copy & HA_ERATT)
+ if (ha_copy & HA_ERATT) {
/* Handle the error attention event */
lpfc_handle_eratt(phba);
+ if (phba->fw_dump_cmpl) {
+ complete(phba->fw_dump_cmpl);
+ phba->fw_dump_cmpl = NULL;
+ }
+ }
+
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
@@ -880,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_latt(phba);
/* Handle VMID Events */
- if (lpfc_is_vmid_enabled(phba)) {
+ if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
if (phba->pport->work_port_events &
WORKER_CHECK_VMID_ISSUE_QFPA) {
lpfc_check_vmid_qfpa_issue(phba);
@@ -930,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
work_port_events = vport->work_port_events;
vport->work_port_events &= ~work_port_events;
spin_unlock_irq(&vport->work_port_lock);
+ if (hba_pci_err)
+ continue;
if (work_port_events & WORKER_DISC_TMO)
lpfc_disc_timeout_handler(vport);
if (work_port_events & WORKER_ELS_TMO)
@@ -1140,6 +1183,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
@@ -1157,6 +1201,13 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
vport->fc_flag &= ~FC_DISC_DELAYED;
spin_unlock_irq(shost->host_lock);
del_timer_sync(&vport->delayed_disc_tmo);
+
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+ /* Assume success on link up */
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+ }
}
int
@@ -1167,15 +1218,20 @@ lpfc_linkdown(struct lpfc_hba *phba)
struct lpfc_vport **vports;
LPFC_MBOXQ_t *mb;
int i;
+ int offline;
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
/* Block all SCSI stack I/Os */
lpfc_scsi_dev_block(phba);
+ offline = pci_channel_offline(phba->pcidev);
phba->defer_flogi_acc_flag = false;
+ /* Clear external loopback plug detected flag */
+ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
spin_unlock_irq(&phba->hbalock);
@@ -1186,6 +1242,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->trunk_link.link1.state = 0;
phba->trunk_link.link2.state = 0;
phba->trunk_link.link3.state = 0;
+ phba->trunk_link.phy_lnk_speed =
+ LPFC_LINK_SPEED_UNKNOWN;
phba->sli4_hba.link_state.logical_speed =
LPFC_LINK_SPEED_UNKNOWN;
}
@@ -1213,7 +1271,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any SLI3 firmware default rpi's */
- if (phba->sli_rev > LPFC_SLI_REV3)
+ if (phba->sli_rev > LPFC_SLI_REV3 || offline)
goto skip_unreg_did;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1297,11 +1355,17 @@ lpfc_linkup_port(struct lpfc_vport *vport)
FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
- FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ if (phba->defer_flogi_acc_flag)
+ vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
+ else
+ vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI |
+ FC_ABORT_DISCOVERY | FC_RSCN_MODE |
+ FC_NLP_MORE | FC_RSCN_DISCOVERY);
vport->fc_flag |= FC_NDISC_ACTIVE;
vport->fc_ns_retry = 0;
spin_unlock_irq(shost->host_lock);
+ lpfc_setup_fdmi_mask(vport);
lpfc_linkup_cleanup_nodes(vport);
}
@@ -1333,9 +1397,8 @@ lpfc_linkup(struct lpfc_hba *phba)
phba->pport->rcv_flogi_cnt = 0;
spin_unlock_irq(shost->host_lock);
- /* reinitialize initial FLOGI flag */
- phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
- phba->defer_flogi_acc_flag = false;
+ /* reinitialize initial HBA flag */
+ phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
return 0;
}
@@ -1413,7 +1476,6 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
LPFC_MBOXQ_t *sparam_mb;
- struct lpfc_dmabuf *sparam_mp;
u16 status = pmb->u.mb.mbxStatus;
int rc;
@@ -1462,13 +1524,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- sparam_mp = (struct lpfc_dmabuf *)
- sparam_mb->ctx_buf;
- lpfc_mbuf_free(phba, sparam_mp->virt,
- sparam_mp->phys);
- kfree(sparam_mp);
- sparam_mb->ctx_buf = NULL;
- mempool_free(sparam_mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
+ MBOX_THD_UNLOCKED);
goto sparam_out;
}
@@ -2099,8 +2156,8 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
* This function makes an running random selection decision on FCF record to
* use through a sequence of @fcf_cnt eligible FCF records with equal
* probability. To perform integer manunipulation of random numbers with
- * size unit32_t, the lower 16 bits of the 32-bit random number returned
- * from prandom_u32() are taken as the random random number generated.
+ * size unit32_t, a 16-bit random number returned from get_random_u16() is
+ * taken as the random random number generated.
*
* Returns true when outcome is for the newly read FCF record should be
* chosen; otherwise, return false when outcome is for keeping the previously
@@ -2112,7 +2169,7 @@ lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
uint32_t rand_num;
/* Get 16-bit uniform random number */
- rand_num = 0xFFFF & prandom_u32();
+ rand_num = get_random_u16();
/* Decision with probability 1/fcf_cnt */
if ((fcf_cnt * rand_num) < 0xFFFF)
@@ -2913,7 +2970,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t next_fcf_index, fcf_index;
uint16_t current_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, stop the roundrobin failover process */
@@ -3018,7 +3075,7 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
struct fcf_record *new_fcf_record;
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
int rc;
/* If link state is not up, no need to proceed */
@@ -3267,7 +3324,6 @@ lpfc_start_fdiscs(struct lpfc_hba *phba)
void
lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
- struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
struct lpfc_vport *vport = mboxq->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -3348,12 +3404,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
out_free_mem:
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (dmabuf) {
- lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
- kfree(dmabuf);
- }
- return;
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
}
static void
@@ -3398,9 +3449,7 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Check if sending the FLOGI is being deferred to after we get
* up to date CSPs from MBX_READ_SPARAM.
@@ -3412,12 +3461,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
out:
- pmb->ctx_buf = NULL;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
lpfc_issue_clear_la(phba, vport);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
}
static void
@@ -3427,7 +3472,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
struct Scsi_Host *shost;
int i;
- struct lpfc_dmabuf *mp;
int rc;
struct fcf_record *fcf_record;
uint32_t fc_flags = 0;
@@ -3555,10 +3599,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
- mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(sparam_mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
goto out;
}
@@ -3727,18 +3768,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
}
phba->fc_eventTag = la->eventTag;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- if (bf_get(lpfc_mbx_read_top_mm, la))
- phba->sli.sli_flag |= LPFC_MENLO_MAINT;
- else
- phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
phba->link_events++;
- if ((attn_type == LPFC_ATT_LINK_UP) &&
- !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
+ if (attn_type == LPFC_ATT_LINK_UP) {
phba->fc_stat.LinkUp++;
if (phba->link_flag & LS_LOOPBACK_MODE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3752,21 +3783,22 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1303 Link Up Event x%x received "
- "Data: x%x x%x x%x x%x x%x x%x %d\n",
+ "Data: x%x x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
bf_get(lpfc_mbx_read_top_alpa_granted,
la),
bf_get(lpfc_mbx_read_top_link_spd, la),
phba->alpa_map[0],
- bf_get(lpfc_mbx_read_top_mm, la),
- bf_get(lpfc_mbx_read_top_fa, la),
- phba->wait_4_mlo_maint_flg);
+ bf_get(lpfc_mbx_read_top_fa, la));
}
lpfc_mbx_process_link_up(phba, la);
if (phba->cmf_active_mode != LPFC_CFG_OFF)
lpfc_cmf_signal_init(phba);
+ if (phba->lmt & LMT_64Gb)
+ lpfc_read_lds_params(phba);
+
} else if (attn_type == LPFC_ATT_LINK_DOWN ||
attn_type == LPFC_ATT_UNEXP_WWPN) {
phba->fc_stat.LinkDown++;
@@ -3780,64 +3812,28 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
else if (attn_type == LPFC_ATT_UNEXP_WWPN)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1313 Link Down Unexpected FA WWPN Event x%x "
- "received Data: x%x x%x x%x x%x x%x\n",
+ "received Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
- "Data: x%x x%x x%x x%x x%x\n",
+ "Data: x%x x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag,
- bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
lpfc_mbx_issue_link_down(phba);
}
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
- attn_type == LPFC_ATT_LINK_UP) {
- if (phba->link_state != LPFC_LINK_DOWN) {
- phba->fc_stat.LinkDown++;
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1312 Link Down Event x%x received "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- lpfc_mbx_issue_link_down(phba);
- } else
- lpfc_enable_la(phba);
-
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1310 Menlo Maint Mode Link up Event x%x rcvd "
- "Data: x%x x%x x%x\n",
- la->eventTag, phba->fc_eventTag,
- phba->pport->port_state, vport->fc_flag);
- /*
- * The cmnd that triggered this will be waiting for this
- * signal.
- */
- /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
- if (phba->wait_4_mlo_maint_flg) {
- phba->wait_4_mlo_maint_flg = 0;
- wake_up_interruptible(&phba->wait_4_mlo_m_q);
- }
- }
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- bf_get(lpfc_mbx_read_top_fa, la)) {
- if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
- lpfc_issue_clear_la(phba, vport);
+ bf_get(lpfc_mbx_read_top_fa, la))
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
"1311 fa %d\n",
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_cmpl_read_topology_free_mbuf:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
- return;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -3850,9 +3846,13 @@ void
lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
+ struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
+ /* The driver calls the state machine with the pmb pointer
+ * but wants to make sure a stale ctx_buf isn't acted on.
+ * The ctx_buf is restored later and cleaned up.
+ */
pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
@@ -3889,10 +3889,9 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
/* Call state machine */
lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
+ pmb->ctx_buf = mp;
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
/* decrement the node reference count held for this callback
* function.
*/
@@ -3928,7 +3927,6 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
/*
@@ -3958,7 +3956,6 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;
@@ -4061,11 +4058,15 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
vport_buff = (uint8_t *) vport_info;
do {
- /* free dma buffer from previous round */
+ /* While loop iteration forces a free dma buffer from
+ * the previous loop because the mbox is reused and
+ * the dump routine is a single-use construct.
+ */
if (pmb->ctx_buf) {
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
+ pmb->ctx_buf = NULL;
}
if (lpfc_dump_static_vport(phba, pmb, offset))
goto out;
@@ -4150,16 +4151,8 @@ lpfc_create_static_vport(struct lpfc_hba *phba)
out:
kfree(vport_info);
- if (mbx_wait_rc != MBX_TIMEOUT) {
- if (pmb->ctx_buf) {
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mempool_free(pmb, phba->mbox_mem_pool);
- }
-
- return;
+ if (mbx_wait_rc != MBX_TIMEOUT)
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/*
@@ -4173,22 +4166,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct Scsi_Host *shost;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0258 Register Fabric login error: 0x%x\n",
mb->mbxStatus);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
/* FLOGI failed, use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -4230,9 +4217,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_do_scr_ns_plogi(phba, vport);
}
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Drop the reference count from the mbox at the end after
* all the current reference to the ndlp have been done.
@@ -4326,12 +4311,10 @@ void
lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
int rc;
- pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
vport->gidft_inp = 0;
@@ -4345,9 +4328,7 @@ out:
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* If the node is not registered with the scsi or nvme
* transport, remove the fabric node. The failed reg_login
@@ -4417,8 +4398,11 @@ out:
rc = lpfc_issue_els_edc(vport, 0);
lpfc_printf_log(phba, KERN_INFO,
LOG_INIT | LOG_ELS | LOG_DISCOVERY,
- "4220 EDC issue error x%x, Data: x%x\n",
+ "4220 Issue EDC status x%x Data x%x\n",
rc, phba->cgn_init_reg_signal);
+ } else if (phba->lmt & LMT_64Gb) {
+ /* may send link fault capability descriptor */
+ lpfc_issue_els_edc(vport, 0);
} else {
lpfc_issue_els_rdf(vport, 0);
}
@@ -4436,10 +4420,7 @@ out:
* callback function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
@@ -4453,13 +4434,9 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
pmb->ctx_ndlp = NULL;
- pmb->ctx_buf = NULL;
-
if (mb->mbxStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0933 %s: Register FC login error: 0x%x\n",
@@ -4483,9 +4460,7 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
out:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
/* Drop the reference count from the mbox at the end after
* all the current reference to the ndlp have been done.
@@ -4708,6 +4683,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "0999 %s Not regd: ndlp x%px rport x%px DID "
+ "x%x FLG x%x XPT x%x\n",
+ __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+ ndlp->nlp_flag, ndlp->fc4_xpt_flags);
return;
}
@@ -4718,6 +4698,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
+ } else if (!ndlp->rport) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+ " XPT x%x refcnt %d\n",
+ __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
}
if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
@@ -4813,22 +4800,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
new_state == NLP_STE_UNMAPPED_NODE)
lpfc_nlp_reg_node(vport, ndlp);
- if ((new_state == NLP_STE_MAPPED_NODE) &&
- (vport->stat_data_enabled)) {
- /*
- * A new target is discovered, if there is no buffer for
- * statistical data collection allocate buffer.
- */
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_KERNEL);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0286 lpfc_nlp_state_cleanup failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
/*
* If the node just added to Mapped list was an FCP target,
* but the remote port registration failed or assigned a target
@@ -5055,7 +5026,8 @@ lpfc_can_disctmo(struct lpfc_vport *vport)
vport->port_state, vport->fc_ns_retry, vport->fc_flag);
/* Turn off discovery timer if its running */
- if (vport->fc_flag & FC_DISC_TMO) {
+ if (vport->fc_flag & FC_DISC_TMO ||
+ timer_pending(&vport->fc_disctmo)) {
spin_lock_irqsave(shost->host_lock, iflags);
vport->fc_flag &= ~FC_DISC_TMO;
spin_unlock_irqrestore(shost->host_lock, iflags);
@@ -5084,24 +5056,30 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb,
struct lpfc_nodelist *ndlp)
{
- IOCB_t *icmd = &iocb->iocb;
- struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_vport *vport = ndlp->vport;
+ u8 ulp_command;
+ u16 ulp_context;
+ u32 remote_id;
if (iocb->vport != vport)
return 0;
+ ulp_command = get_job_cmnd(phba, iocb);
+ ulp_context = get_job_ulpcontext(phba, iocb);
+ remote_id = get_job_els_rsp64_did(phba, iocb);
+
if (pring->ringno == LPFC_ELS_RING) {
- switch (icmd->ulpCommand) {
+ switch (ulp_command) {
case CMD_GEN_REQUEST64_CR:
- if (iocb->context_un.ndlp == ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
fallthrough;
case CMD_ELS_REQUEST64_CR:
- if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
+ if (remote_id == ndlp->nlp_DID)
return 1;
fallthrough;
case CMD_XMIT_ELS_RSP64_CX:
- if (iocb->context1 == (uint8_t *) ndlp)
+ if (iocb->ndlp == ndlp)
return 1;
}
} else if (pring->ringno == LPFC_FCP_RING) {
@@ -5110,9 +5088,8 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
return 0;
}
- if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
+ if (ulp_context == ndlp->nlp_rpi)
return 1;
- }
}
return 0;
}
@@ -5212,7 +5189,6 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!ndlp)
return;
lpfc_issue_els_logo(vport, ndlp, 0);
- mempool_free(pmb, phba->mbox_mem_pool);
/* Check to see if there are any deferred events to process */
if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
@@ -5239,6 +5215,13 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
spin_unlock_irq(&ndlp->lock);
}
+
+ /* The node has an outstanding reference for the unreg. Now
+ * that the LOGO action and cleanup are finished, release
+ * resources.
+ */
+ lpfc_nlp_put(ndlp);
+ mempool_free(pmb, phba->mbox_mem_pool);
}
/*
@@ -5361,6 +5344,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
mempool_free(mbox, phba->mbox_mem_pool);
acc_plogi = 1;
+ lpfc_nlp_put(ndlp);
}
} else {
lpfc_printf_vlog(vport, KERN_INFO,
@@ -5507,7 +5491,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
- struct lpfc_dmabuf *mp;
/* Cleanup node for NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
@@ -5545,16 +5528,11 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
!(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
list_del(&mb->list);
- mempool_free(mb, phba->mbox_mem_pool);
- /* We shall not invoke the lpfc_nlp_put to decrement
- * the ndlp reference count as we are in the process
- * of lpfc_nlp_release.
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
+
+ /* Don't invoke lpfc_nlp_put. The driver is in
+ * lpfc_nlp_release context.
*/
}
}
@@ -6023,9 +6001,9 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
+ u32 ulp_command;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
@@ -6036,12 +6014,13 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
*/
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
- (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
list_move_tail(&iocb->list, &completions);
}
@@ -6049,12 +6028,13 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
/* Next check the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
- if (iocb->context1 != ndlp) {
+ if (iocb->ndlp != ndlp)
continue;
- }
- icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
- icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
+
+ ulp_command = get_job_cmnd(phba, iocb);
+
+ if (ulp_command == CMD_ELS_REQUEST64_CR ||
+ ulp_command == CMD_XMIT_ELS_RSP64_CX) {
lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
}
}
@@ -6085,12 +6065,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
}
}
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state. When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ * none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+ struct lpfc_nodelist *ndlp, *next_ndlp;
+
+ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+ nlp_listp) {
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ }
+}
void
lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
{
lpfc_els_flush_rscn(vport);
lpfc_els_flush_cmd(vport);
lpfc_disc_flush_list(vport);
+ if (pci_channel_offline(vport->phba->pcidev))
+ lpfc_notify_xport_npr(vport);
}
/*****************************************************************************/
@@ -6304,8 +6306,9 @@ restart_disc:
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"0231 RSCN timeout Data: x%x "
- "x%x\n",
- vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
+ "x%x x%x x%x\n",
+ vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
+ vport->port_state, vport->gidft_inp);
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_cmd(vport);
@@ -6375,11 +6378,9 @@ void
lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
MAILBOX_t *mb = &pmb->u.mb;
- struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
struct lpfc_vport *vport = pmb->vport;
- pmb->ctx_buf = NULL;
pmb->ctx_ndlp = NULL;
if (phba->sli_rev < LPFC_SLI_REV4)
@@ -6410,10 +6411,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* function.
*/
lpfc_nlp_put(ndlp);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return;
}
@@ -6646,7 +6644,6 @@ lpfc_nlp_release(struct kref *kref)
ndlp->fc4_xpt_flags = 0;
/* free ndlp memory for final ndlp release */
- kfree(ndlp->lat_data);
if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
mempool_free(ndlp->active_rrqs_xri_bitmap,
ndlp->phba->active_rrq_pool);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 634f8fff7425..5c283936ff08 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -97,6 +97,18 @@ union CtCommandResponse {
#define FC4_FEATURE_INIT 0x2
#define FC4_FEATURE_NVME_DISC 0x4
+enum rft_word0 {
+ RFT_FCP_REG = (0x1 << 8),
+};
+
+enum rft_word1 {
+ RFT_NVME_REG = (0x1 << 8),
+};
+
+enum rft_word3 {
+ RFT_APP_SERV_REG = (0x1 << 0),
+};
+
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
union CtRevisionId RevisionId;
@@ -131,25 +143,13 @@ struct lpfc_sli_ct_request {
uint8_t Fc4Type;
} gid_ff;
struct rft {
- uint32_t PortId; /* For RFT_ID requests */
-
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd0:16;
- uint32_t rsvd1:7;
- uint32_t fcpReg:1; /* Type 8 */
- uint32_t rsvd2:2;
- uint32_t ipReg:1; /* Type 5 */
- uint32_t rsvd3:5;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint32_t rsvd0:16;
- uint32_t fcpReg:1; /* Type 8 */
- uint32_t rsvd1:7;
- uint32_t rsvd3:5;
- uint32_t ipReg:1; /* Type 5 */
- uint32_t rsvd2:2;
-#endif
+ __be32 port_id; /* For RFT_ID requests */
- uint32_t rsvd[7];
+ __be32 fcp_reg; /* rsvd 31:9, fcp_reg 8, rsvd 7:0 */
+ __be32 nvme_reg; /* rsvd 31:9, nvme_reg 8, rsvd 7:0 */
+ __be32 word2;
+ __be32 app_serv_reg; /* rsvd 31:1, app_serv_reg 0 */
+ __be32 word[4];
} rft;
struct rnn {
uint32_t PortId; /* For RNN_ID requests */
@@ -511,8 +511,6 @@ struct class_parms {
uint8_t word3Reserved2; /* Fc Word 3, bit 0: 7 */
};
-#define FAPWWN_KEY_VENDOR 0x42524344 /*valid vendor version fawwpn key*/
-
struct serv_parm { /* Structure is in Big Endian format */
struct csp cmn;
struct lpfc_name portName;
@@ -664,6 +662,7 @@ struct fc_vft_header {
struct ls_rjt { /* Structure is in Big Endian format */
union {
+ __be32 ls_rjt_error_be;
uint32_t lsRjtError;
struct {
uint8_t lsRjtRsvd0; /* FC Word 0, bit 24:31 */
@@ -704,6 +703,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
#define LSEXP_OUT_OF_RESOURCE 0x29
#define LSEXP_CANT_GIVE_DATA 0x2A
#define LSEXP_REQ_UNSUPPORTED 0x2C
+#define LSEXP_NO_RSRC_ASSIGN 0x52
uint8_t vendorUnique; /* FC Word 0, bit 0: 7 */
} b;
} un;
@@ -1442,30 +1442,56 @@ struct lpfc_vmid_gallapp_ident_list {
/* Definitions for HBA / Port attribute entries */
-/* Attribute Entry */
-struct lpfc_fdmi_attr_entry {
- union {
- uint32_t AttrInt;
- uint8_t AttrTypes[32];
- uint8_t AttrString[256];
- struct lpfc_name AttrWWN;
- } un;
+/* Attribute Entry Structures */
+
+struct lpfc_fdmi_attr_u32 {
+ __be16 type;
+ __be16 len;
+ __be32 value_u32;
};
-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
- /* Structure is in Big Endian format */
- uint32_t AttrType:16;
- uint32_t AttrLen:16;
- /* Marks start of Value (ATTRIBUTE_ENTRY) */
- struct lpfc_fdmi_attr_entry AttrValue;
-} __packed;
+struct lpfc_fdmi_attr_wwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 name[8];
+};
+
+struct lpfc_fdmi_attr_fullwwn {
+ __be16 type;
+ __be16 len;
+
+ /* Keep as u8[8] instead of __be64 to avoid accidental zero padding
+ * by compiler
+ */
+ u8 nname[8];
+ u8 pname[8];
+};
+
+struct lpfc_fdmi_attr_fc4types {
+ __be16 type;
+ __be16 len;
+ u8 value_types[32];
+};
+
+struct lpfc_fdmi_attr_string {
+ __be16 type;
+ __be16 len;
+ char value_string[256];
+};
+
+/* Maximum FDMI attribute length is Type+Len (4 bytes) + 256 byte string */
+#define FDMI_MAX_ATTRLEN sizeof(struct lpfc_fdmi_attr_string)
/*
* HBA Attribute Block
*/
struct lpfc_fdmi_attr_block {
uint32_t EntryCnt; /* Number of HBA attribute entries */
- struct lpfc_fdmi_attr_entry Entry; /* Variable-length array */
+ /* Variable Length Attribute Entry TLV's follow */
};
/*
@@ -1729,7 +1755,6 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_HELIOS_SCSP 0xfd11
#define PCI_DEVICE_ID_HELIOS_DCSP 0xfd12
#define PCI_DEVICE_ID_ZEPHYR 0xfe00
-#define PCI_DEVICE_ID_HORNET 0xfe05
#define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11
#define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12
#define PCI_VENDOR_ID_SERVERENGINE 0x19a2
@@ -1737,6 +1762,28 @@ struct lpfc_fdmi_reg_portattr {
#define PCI_DEVICE_ID_TOMCAT 0x0714
#define PCI_DEVICE_ID_SKYHAWK 0x0724
#define PCI_DEVICE_ID_SKYHAWK_VF 0x072c
+#define PCI_VENDOR_ID_ATTO 0x117c
+#define PCI_DEVICE_ID_CLRY_16XE 0x0064
+#define PCI_DEVICE_ID_CLRY_161E 0x0063
+#define PCI_DEVICE_ID_CLRY_162E 0x0064
+#define PCI_DEVICE_ID_CLRY_164E 0x0065
+#define PCI_DEVICE_ID_CLRY_16XP 0x0094
+#define PCI_DEVICE_ID_CLRY_161P 0x00a0
+#define PCI_DEVICE_ID_CLRY_162P 0x0094
+#define PCI_DEVICE_ID_CLRY_164P 0x00a1
+#define PCI_DEVICE_ID_CLRY_32XE 0x0094
+#define PCI_DEVICE_ID_CLRY_321E 0x00a2
+#define PCI_DEVICE_ID_CLRY_322E 0x00a3
+#define PCI_DEVICE_ID_CLRY_324E 0x00ac
+#define PCI_DEVICE_ID_CLRY_32XP 0x00bb
+#define PCI_DEVICE_ID_CLRY_321P 0x00bc
+#define PCI_DEVICE_ID_CLRY_322P 0x00bd
+#define PCI_DEVICE_ID_CLRY_324P 0x00be
+#define PCI_DEVICE_ID_TLFC_2 0x0064
+#define PCI_DEVICE_ID_TLFC_2XX2 0x4064
+#define PCI_DEVICE_ID_TLFC_3 0x0094
+#define PCI_DEVICE_ID_TLFC_3162 0x40a6
+#define PCI_DEVICE_ID_TLFC_3322 0x40a7
#define JEDEC_ID_ADDRESS 0x0080001c
#define FIREFLY_JEDEC_ID 0x1ACC
@@ -1752,7 +1799,6 @@ struct lpfc_fdmi_reg_portattr {
#define ZEPHYR_JEDEC_ID 0x0577
#define VIPER_JEDEC_ID 0x4838
#define SATURN_JEDEC_ID 0x1004
-#define HORNET_JDEC_ID 0x2057706D
#define JEDEC_ID_MASK 0x0FFFF000
#define JEDEC_ID_SHIFT 12
@@ -2649,19 +2695,26 @@ typedef struct {
} READ_SPARM_VAR;
/* Structure for MB Command READ_STATUS (14) */
+enum read_status_word1 {
+ RD_ST_CC = 0x01,
+ RD_ST_XKB = 0x80,
+};
+
+enum read_status_word17 {
+ RD_ST_XMIT_XKB_MASK = 0x3fffff,
+};
+
+enum read_status_word18 {
+ RD_ST_RCV_XKB_MASK = 0x3fffff,
+};
typedef struct {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1:31;
- uint32_t clrCounters:1;
- uint16_t activeXriCnt;
- uint16_t activeRpiCnt;
-#else /* __LITTLE_ENDIAN_BITFIELD */
- uint32_t clrCounters:1;
- uint32_t rsvd1:31;
- uint16_t activeRpiCnt;
- uint16_t activeXriCnt;
-#endif
+ u8 clear_counters; /* rsvd 7:1, cc 0 */
+ u8 rsvd5;
+ u8 rsvd6;
+ u8 xkb; /* xkb 7, rsvd 6:0 */
+
+ u32 rsvd8;
uint32_t xmitByteCnt;
uint32_t rcvByteCnt;
@@ -2673,6 +2726,14 @@ typedef struct {
uint32_t totalRespExchanges;
uint32_t rcvPbsyCnt;
uint32_t rcvFbsyCnt;
+
+ u32 drop_frame_no_rq;
+ u32 empty_rq;
+ u32 drop_frame_no_xri;
+ u32 empty_xri;
+
+ u32 xmit_xkb; /* rsvd 31:22, xmit_xkb 21:0 */
+ u32 rcv_xkb; /* rsvd 31:22, rcv_xkb 21:0 */
} READ_STATUS_VAR;
/* Structure for MB Command READ_RPI (15) */
@@ -3038,7 +3099,6 @@ struct lpfc_mbx_read_top {
#define lpfc_mbx_read_top_topology_WORD word3
#define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */
#define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */
-#define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */
/* store the LILP AL_PA position map into */
struct ulp_bde64 lilpBde64;
#define LPFC_ALPA_MAP_SIZE 128
@@ -3675,19 +3735,26 @@ union sli_var {
};
typedef struct {
+ struct_group_tagged(MAILBOX_word0, bits,
+ union {
+ struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint16_t mbxStatus;
- uint8_t mbxCommand;
- uint8_t mbxReserved:6;
- uint8_t mbxHc:1;
- uint8_t mbxOwner:1; /* Low order bit first word */
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint8_t mbxOwner:1; /* Low order bit first word */
- uint8_t mbxHc:1;
- uint8_t mbxReserved:6;
- uint8_t mbxCommand;
- uint16_t mbxStatus;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
#endif
+ };
+ u32 word0;
+ };
+ );
MAILVARIANTS un;
union sli_var us;
@@ -3746,7 +3813,7 @@ typedef struct {
#define IOERR_ILLEGAL_COMMAND 0x06
#define IOERR_XCHG_DROPPED 0x07
#define IOERR_ILLEGAL_FIELD 0x08
-#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_RPI_SUSPENDED 0x09
#define IOERR_TOO_MANY_BUFFERS 0x0A
#define IOERR_RCV_BUFFER_WAITING 0x0B
#define IOERR_NO_CONNECTION 0x0C
@@ -4369,23 +4436,15 @@ lpfc_is_LC_HBA(unsigned short device)
}
/*
- * Determine if an IOCB failed because of a link event or firmware reset.
+ * Determine if failed because of a link event or firmware reset.
*/
-
static inline int
-lpfc_error_lost_link(IOCB_t *iocbp)
+lpfc_error_lost_link(u32 ulp_status, u32 ulp_word4)
{
- return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
- iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
- iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
+ return (ulp_status == IOSTAT_LOCAL_REJECT &&
+ (ulp_word4 == IOERR_SLI_ABORTED ||
+ ulp_word4 == IOERR_LINK_DOWN ||
+ ulp_word4 == IOERR_SLI_DOWN));
}
-#define MENLO_TRANSPORT_TYPE 0xfe
-#define MENLO_CONTEXT 0
-#define MENLO_PU 3
-#define MENLO_TIMEOUT 30
-#define SETVAR_MLOMNT 0x103107
-#define SETVAR_MLORST 0x103007
-
#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6ec42991d2ab..5288fc69908a 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,6 +60,14 @@
((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+#define get_wqe_reqtag(x) (((x)->wqe.words[9] >> 0) & 0xFFFF)
+#define get_wqe_tmo(x) (((x)->wqe.words[7] >> 24) & 0x00FF)
+
+#define get_job_ulpword(x, y) ((x)->iocb.un.ulpWord[y])
+
+#define set_job_ulpstatus(x, y) bf_set(lpfc_wcqe_c_status, &(x)->wcqe_cmpl, y)
+#define set_job_ulpword4(x, y) ((&(x)->wcqe_cmpl)->parameter = y)
+
struct dma_address {
uint32_t addr_lo;
uint32_t addr_hi;
@@ -230,6 +238,34 @@ struct lpfc_sli_intf {
/* PORT_CAPABILITIES constants. */
#define LPFC_MAX_SUPPORTED_PAGES 8
+enum ulp_bde64_word3 {
+ ULP_BDE64_SIZE_MASK = 0xffffff,
+
+ ULP_BDE64_TYPE_SHIFT = 24,
+ ULP_BDE64_TYPE_MASK = (0xff << ULP_BDE64_TYPE_SHIFT),
+
+ /* BDE (Host_resident) */
+ ULP_BDE64_TYPE_BDE_64 = (0x00 << ULP_BDE64_TYPE_SHIFT),
+ /* Immediate Data BDE */
+ ULP_BDE64_TYPE_BDE_IMMED = (0x01 << ULP_BDE64_TYPE_SHIFT),
+ /* BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64P = (0x02 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Host-resident) */
+ ULP_BDE64_TYPE_BDE_64I = (0x08 << ULP_BDE64_TYPE_SHIFT),
+ /* Input BDE (Port-resident) */
+ ULP_BDE64_TYPE_BDE_64IP = (0x0A << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Host-resident) */
+ ULP_BDE64_TYPE_BLP_64 = (0x40 << ULP_BDE64_TYPE_SHIFT),
+ /* BLP (Port-resident) */
+ ULP_BDE64_TYPE_BLP_64P = (0x42 << ULP_BDE64_TYPE_SHIFT),
+};
+
+struct ulp_bde64_le {
+ __le32 type_size; /* type 31:24, size 23:0 */
+ __le32 addr_low;
+ __le32 addr_high;
+};
+
struct ulp_bde64 {
union ULP_BDE_TUS {
uint32_t w;
@@ -702,6 +738,7 @@ struct lpfc_register {
#define lpfc_sliport_eqdelay_id_WORD word0
#define LPFC_SEC_TO_USEC 1000000
#define LPFC_SEC_TO_MSEC 1000
+#define LPFC_MSECS_TO_SECS(msecs) ((msecs) / 1000)
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -2857,6 +2894,9 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_extnts_inuse_SHIFT 31
#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
+#define lpfc_mbx_rd_conf_fawwpn_SHIFT 30
+#define lpfc_mbx_rd_conf_fawwpn_MASK 0x00000001
+#define lpfc_mbx_rd_conf_fawwpn_WORD word1
#define lpfc_mbx_rd_conf_wcs_SHIFT 28 /* warning signaling */
#define lpfc_mbx_rd_conf_wcs_MASK 0x00000001
#define lpfc_mbx_rd_conf_wcs_WORD word1
@@ -3444,9 +3484,10 @@ struct lpfc_sli4_parameters {
#define LPFC_SET_UE_RECOVERY 0x10
#define LPFC_SET_MDS_DIAGS 0x12
-#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_DUAL_DUMP 0x1e
+#define LPFC_SET_CGN_SIGNAL 0x1f
#define LPFC_SET_ENABLE_MI 0x21
+#define LPFC_SET_LD_SIGNAL 0x23
#define LPFC_SET_ENABLE_CMF 0x24
struct lpfc_mbx_set_feature {
struct mbox_header header;
@@ -3477,13 +3518,17 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_cmf_SHIFT 0
#define lpfc_mbx_set_feature_cmf_MASK 0x00000001
#define lpfc_mbx_set_feature_cmf_WORD word6
+#define lpfc_mbx_set_feature_lds_qry_SHIFT 0
+#define lpfc_mbx_set_feature_lds_qry_MASK 0x00000001
+#define lpfc_mbx_set_feature_lds_qry_WORD word6
+#define LPFC_QUERY_LDS_OP 1
#define lpfc_mbx_set_feature_mi_SHIFT 0
#define lpfc_mbx_set_feature_mi_MASK 0x0000ffff
#define lpfc_mbx_set_feature_mi_WORD word6
#define lpfc_mbx_set_feature_milunq_SHIFT 16
#define lpfc_mbx_set_feature_milunq_MASK 0x0000ffff
#define lpfc_mbx_set_feature_milunq_WORD word6
- uint32_t word7;
+ u32 word7;
#define lpfc_mbx_set_feature_UERP_SHIFT 0
#define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff
#define lpfc_mbx_set_feature_UERP_WORD word7
@@ -3497,6 +3542,8 @@ struct lpfc_mbx_set_feature {
#define lpfc_mbx_set_feature_CGN_acqe_freq_SHIFT 0
#define lpfc_mbx_set_feature_CGN_acqe_freq_MASK 0x000000ff
#define lpfc_mbx_set_feature_CGN_acqe_freq_WORD word8
+ u32 word9;
+ u32 word10;
};
@@ -4274,7 +4321,7 @@ struct lpfc_acqe_cgn_signal {
struct lpfc_acqe_sli {
uint32_t event_data1;
uint32_t event_data2;
- uint32_t reserved;
+ uint32_t event_data3;
uint32_t trailer;
#define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1
#define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2
@@ -4287,6 +4334,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF
#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10
#define LPFC_SLI_EVENT_TYPE_CGN_SIGNAL 0x11
+#define LPFC_SLI_EVENT_TYPE_RD_SIGNAL 0x12
};
/*
@@ -4437,12 +4485,8 @@ struct wqe_common {
#define wqe_cmd_type_MASK 0x0000000f
#define wqe_cmd_type_WORD word11
#define wqe_els_id_SHIFT 4
-#define wqe_els_id_MASK 0x00000003
+#define wqe_els_id_MASK 0x00000007
#define wqe_els_id_WORD word11
-#define LPFC_ELS_ID_FLOGI 3
-#define LPFC_ELS_ID_FDISC 2
-#define LPFC_ELS_ID_LOGO 1
-#define LPFC_ELS_ID_DEFAULT 0
#define wqe_irsp_SHIFT 4
#define wqe_irsp_MASK 0x00000001
#define wqe_irsp_WORD word11
@@ -4452,6 +4496,9 @@ struct wqe_common {
#define wqe_sup_SHIFT 6
#define wqe_sup_MASK 0x00000001
#define wqe_sup_WORD word11
+#define wqe_ffrq_SHIFT 6
+#define wqe_ffrq_MASK 0x00000001
+#define wqe_ffrq_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
@@ -4489,6 +4536,14 @@ struct lpfc_wqe_generic{
uint32_t payload[4];
};
+enum els_request64_wqe_word11 {
+ LPFC_ELS_ID_DEFAULT,
+ LPFC_ELS_ID_LOGO,
+ LPFC_ELS_ID_FDISC,
+ LPFC_ELS_ID_FLOGI,
+ LPFC_ELS_ID_PLOGI,
+};
+
struct els_request64_wqe {
struct ulp_bde64 bde;
uint32_t payload_len;
@@ -4690,7 +4745,6 @@ struct create_xri_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
-#define INHIBIT_ABORT 1
#define T_REQUEST_TAG 3
#define T_XRI_TAG 1
@@ -4753,6 +4807,9 @@ struct cmf_sync_wqe {
#define cmf_sync_cqid_WORD word11
uint32_t read_bytes;
uint32_t word13;
+#define cmf_sync_period_SHIFT 16
+#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_WORD word13
uint32_t word14;
uint32_t word15;
};
@@ -5001,22 +5058,6 @@ struct lpfc_grp_hdr {
{ FPIN_CONGN_SEVERITY_ERROR, "Alarm" }, \
}
-/* EDC supports two descriptors. When allocated, it is the
- * size of this structure plus each supported descriptor.
- */
-struct lpfc_els_edc_req {
- struct fc_els_edc edc; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
-/* Minimum structure defines for the EDC response.
- * Balance is in buffer.
- */
-struct lpfc_els_edc_rsp {
- struct fc_els_edc_resp edc_rsp; /* hdr up to descriptors */
- struct fc_diag_cg_sig_desc cgn_desc; /* 1st descriptor */
-};
-
/* Used for logging FPIN messages */
#define LPFC_FPIN_WWPN_LINE_SZ 128
#define LPFC_FPIN_WWPN_LINE_CNT 6
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 6a90e6e53d09..0b1616e93cf4 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -60,8 +60,6 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
PCI_ANY_ID, PCI_ANY_ID, },
- {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
- PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
@@ -124,5 +122,35 @@ const struct pci_device_id lpfc_id_table[] = {
PCI_ANY_ID, PCI_ANY_ID, },
{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
PCI_ANY_ID, PCI_ANY_ID, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_161P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_162P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_16XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_164P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XE,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324E, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_321P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_322P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_32XP,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_CLRY_324P, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_2XX2, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3162, },
+ {PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3,
+ PCI_VENDOR_ID_ATTO, PCI_DEVICE_ID_TLFC_3322, },
{ 0 }
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ba17a8f740a9..b535f1fd3010 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -324,8 +325,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
prog_id_word = pmboxq->u.mb.un.varWords[7];
/* Decode the Option rom version word to a readable string */
- if (prg->dist < 4)
- dist = dist_char[prg->dist];
+ dist = dist_char[prg->dist];
if ((prg->dist == 3) && (prg->num == 0))
snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
@@ -340,7 +340,6 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
/**
* lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
- * cfg_soft_wwnn, cfg_soft_wwpn
* @vport: pointer to lpfc vport data structure.
*
*
@@ -350,22 +349,13 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
void
lpfc_update_vport_wwn(struct lpfc_vport *vport)
{
- uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
- u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
-
- /* If the soft name exists then update it using the service params */
- if (vport->phba->cfg_soft_wwnn)
- u64_to_wwn(vport->phba->cfg_soft_wwnn,
- vport->fc_sparam.nodeName.u.wwn);
- if (vport->phba->cfg_soft_wwpn)
- u64_to_wwn(vport->phba->cfg_soft_wwpn,
- vport->fc_sparam.portName.u.wwn);
+ struct lpfc_hba *phba = vport->phba;
/*
* If the name is empty or there exists a soft name
* then copy the service params name, otherwise use the fc name
*/
- if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
+ if (vport->fc_nodename.u.wwn[0] == 0)
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
else
@@ -378,22 +368,35 @@ lpfc_update_vport_wwn(struct lpfc_vport *vport)
*/
if (vport->fc_portname.u.wwn[0] != 0 &&
memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name)))
+ sizeof(struct lpfc_name))) {
vport->vport_flag |= FAWWPN_PARAM_CHG;
- if (vport->fc_portname.u.wwn[0] == 0 ||
- vport->phba->cfg_soft_wwpn ||
- (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
- vport->vport_flag & FAWWPN_SET) {
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ vport->port_type == LPFC_PHYSICAL_PORT &&
+ phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
+ if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
+ phba->sli4_hba.fawwpn_flag &=
+ ~LPFC_FAWWPN_FABRIC;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_DISCOVERY | LOG_ELS,
+ "2701 FA-PWWN change WWPN from %llx to "
+ "%llx: vflag x%x fawwpn_flag x%x\n",
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64
+ (vport->fc_sparam.portName.u.wwn),
+ vport->vport_flag,
+ phba->sli4_hba.fawwpn_flag);
+ memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+ }
+ }
+
+ if (vport->fc_portname.u.wwn[0] == 0)
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
- sizeof(struct lpfc_name));
- vport->vport_flag &= ~FAWWPN_SET;
- if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
- vport->vport_flag |= FAWWPN_SET;
- }
+ sizeof(struct lpfc_name));
else
memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
- sizeof(struct lpfc_name));
+ sizeof(struct lpfc_name));
}
/**
@@ -452,15 +455,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
"READ_SPARM mbxStatus x%x\n",
mb->mbxCommand, mb->mbxStatus);
phba->link_state = LPFC_HBA_ERROR;
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return -EIO;
}
mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
+ * longer needed. Prevent unintended ctx_buf access as the mbox is
+ * reused.
+ */
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -695,8 +699,14 @@ lpfc_sli4_refresh_params(struct lpfc_hba *phba)
return rc;
}
mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
- phba->sli4_hba.pc_sli4_params.mi_ver =
+
+ /* Are we forcing MI off via module parameter? */
+ if (phba->cfg_enable_mi)
+ phba->sli4_hba.pc_sli4_params.mi_ver =
bf_get(cfg_mi_ver, mbx_sli4_parameters);
+ else
+ phba->sli4_hba.pc_sli4_params.mi_ver = 0;
+
phba->sli4_hba.pc_sli4_params.cmf =
bf_get(cfg_cmf, mbx_sli4_parameters);
phba->sli4_hba.pc_sli4_params.pls =
@@ -1027,7 +1037,7 @@ lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
spin_lock_irq(&pring->ring_lock);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
list_splice_init(&pring->txcmplq, &completions);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&pring->ring_lock);
@@ -1652,7 +1662,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
if (phba->link_state == LPFC_HBA_ERROR &&
- phba->hba_flag & HBA_PCI_ERR) {
+ test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
spin_unlock_irq(&phba->hbalock);
return;
}
@@ -1995,6 +2005,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (pci_channel_offline(phba->pcidev)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3166 pci channel is offline\n");
+ lpfc_sli_flush_io_rings(phba);
return;
}
@@ -2104,7 +2115,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
@@ -2184,7 +2195,6 @@ lpfc_handle_latt(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *pmb;
volatile uint32_t control;
- struct lpfc_dmabuf *mp;
int rc = 0;
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -2193,23 +2203,17 @@ lpfc_handle_latt(struct lpfc_hba *phba)
goto lpfc_handle_latt_err_exit;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
rc = 2;
- goto lpfc_handle_latt_free_pmb;
- }
-
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- rc = 3;
- goto lpfc_handle_latt_free_mp;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ goto lpfc_handle_latt_err_exit;
}
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
-
psli->slistat.link_event++;
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
@@ -2230,11 +2234,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
lpfc_handle_latt_free_mbuf:
phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
-lpfc_handle_latt_free_mp:
- kfree(mp);
-lpfc_handle_latt_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
lpfc_handle_latt_err_exit:
/* Enable Link attention interrupts */
spin_lock_irq(&phba->hbalock);
@@ -2257,6 +2257,101 @@ lpfc_handle_latt_err_exit:
return;
}
+static void
+lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
+{
+ int i, j;
+
+ while (length > 0) {
+ /* Look for Serial Number */
+ if ((vpd[*pindex] == 'S') && (vpd[*pindex + 1] == 'N')) {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->SerialNumber[j++] = vpd[(*pindex)++];
+ if (j == 31)
+ break;
+ }
+ phba->SerialNumber[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '1')) {
+ phba->vpd_flag |= VPD_MODEL_DESC;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelDesc[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ModelDesc[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '2')) {
+ phba->vpd_flag |= VPD_MODEL_NAME;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ModelName[j++] = vpd[(*pindex)++];
+ if (j == 79)
+ break;
+ }
+ phba->ModelName[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '3')) {
+ phba->vpd_flag |= VPD_PROGRAM_TYPE;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3+i);
+ while (i--) {
+ phba->ProgramType[j++] = vpd[(*pindex)++];
+ if (j == 255)
+ break;
+ }
+ phba->ProgramType[j] = 0;
+ continue;
+ } else if ((vpd[*pindex] == 'V') && (vpd[*pindex + 1] == '4')) {
+ phba->vpd_flag |= VPD_PORT;
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ j = 0;
+ length -= (3 + i);
+ while (i--) {
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_GET)) {
+ j++;
+ (*pindex)++;
+ } else
+ phba->Port[j++] = vpd[(*pindex)++];
+ if (j == 19)
+ break;
+ }
+ if ((phba->sli_rev != LPFC_SLI_REV4) ||
+ (phba->sli4_hba.pport_name_sta ==
+ LPFC_SLI4_PPNAME_NON))
+ phba->Port[j] = 0;
+ continue;
+ } else {
+ *pindex += 2;
+ i = vpd[*pindex];
+ *pindex += 1;
+ *pindex += i;
+ length -= (3 + i);
+ }
+ }
+}
+
/**
* lpfc_parse_vpd - Parse VPD (Vital Product Data)
* @phba: pointer to lpfc hba data structure.
@@ -2276,7 +2371,7 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
{
uint8_t lenlo, lenhi;
int Length;
- int i, j;
+ int i;
int finished = 0;
int index = 0;
@@ -2309,101 +2404,10 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
Length = ((((unsigned short)lenhi) << 8) + lenlo);
if (Length > len - index)
Length = len - index;
- while (Length > 0) {
- /* Look for Serial Number */
- if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->SerialNumber[j++] = vpd[index++];
- if (j == 31)
- break;
- }
- phba->SerialNumber[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
- phba->vpd_flag |= VPD_MODEL_DESC;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelDesc[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ModelDesc[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
- phba->vpd_flag |= VPD_MODEL_NAME;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ModelName[j++] = vpd[index++];
- if (j == 79)
- break;
- }
- phba->ModelName[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
- phba->vpd_flag |= VPD_PROGRAM_TYPE;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- phba->ProgramType[j++] = vpd[index++];
- if (j == 255)
- break;
- }
- phba->ProgramType[j] = 0;
- continue;
- }
- else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
- phba->vpd_flag |= VPD_PORT;
- index += 2;
- i = vpd[index];
- index += 1;
- j = 0;
- Length -= (3+i);
- while(i--) {
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_GET)) {
- j++;
- index++;
- } else
- phba->Port[j++] = vpd[index++];
- if (j == 19)
- break;
- }
- if ((phba->sli_rev != LPFC_SLI_REV4) ||
- (phba->sli4_hba.pport_name_sta ==
- LPFC_SLI4_PPNAME_NON))
- phba->Port[j] = 0;
- continue;
- }
- else {
- index += 2;
- i = vpd[index];
- index += 1;
- index += i;
- Length -= (3 + i);
- }
- }
- finished = 0;
- break;
+
+ lpfc_fill_vpd(phba, vpd, Length, &index);
+ finished = 0;
+ break;
case 0x78:
finished = 1;
break;
@@ -2417,6 +2421,90 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
}
/**
+ * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
+ * @phba: pointer to lpfc hba data structure.
+ * @mdp: pointer to the data structure to hold the derived model name.
+ * @descp: pointer to the data structure to hold the derived description.
+ *
+ * This routine retrieves HBA's description based on its registered PCI device
+ * ID. The @descp passed into this function points to an array of 256 chars. It
+ * shall be returned with the model name, maximum speed, and the host bus type.
+ * The @mdp passed into this function points to an array of 80 chars. When the
+ * function returns, the @mdp will be filled with the model name.
+ **/
+static void
+lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
+{
+ uint16_t sub_dev_id = phba->pcidev->subsystem_device;
+ char *model = "<Unknown>";
+ int tbolt = 0;
+
+ switch (sub_dev_id) {
+ case PCI_DEVICE_ID_CLRY_161E:
+ model = "161E";
+ break;
+ case PCI_DEVICE_ID_CLRY_162E:
+ model = "162E";
+ break;
+ case PCI_DEVICE_ID_CLRY_164E:
+ model = "164E";
+ break;
+ case PCI_DEVICE_ID_CLRY_161P:
+ model = "161P";
+ break;
+ case PCI_DEVICE_ID_CLRY_162P:
+ model = "162P";
+ break;
+ case PCI_DEVICE_ID_CLRY_164P:
+ model = "164P";
+ break;
+ case PCI_DEVICE_ID_CLRY_321E:
+ model = "321E";
+ break;
+ case PCI_DEVICE_ID_CLRY_322E:
+ model = "322E";
+ break;
+ case PCI_DEVICE_ID_CLRY_324E:
+ model = "324E";
+ break;
+ case PCI_DEVICE_ID_CLRY_321P:
+ model = "321P";
+ break;
+ case PCI_DEVICE_ID_CLRY_322P:
+ model = "322P";
+ break;
+ case PCI_DEVICE_ID_CLRY_324P:
+ model = "324P";
+ break;
+ case PCI_DEVICE_ID_TLFC_2XX2:
+ model = "2XX2";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3162:
+ model = "3162";
+ tbolt = 1;
+ break;
+ case PCI_DEVICE_ID_TLFC_3322:
+ model = "3322";
+ tbolt = 1;
+ break;
+ default:
+ model = "Unknown";
+ break;
+ }
+
+ if (mdp && mdp[0] == '\0')
+ snprintf(mdp, 79, "%s", model);
+
+ if (descp && descp[0] == '\0')
+ snprintf(descp, 255,
+ "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
+ (tbolt) ? "ThunderLink FC " : "Celerity FC-",
+ model,
+ phba->Port);
+}
+
+/**
* lpfc_get_hba_model_desc - Retrieve HBA device model name and description
* @phba: pointer to lpfc hba data structure.
* @mdp: pointer to the data structure to hold the derived model name.
@@ -2446,6 +2534,11 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
&& descp && descp[0] != '\0')
return;
+ if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
+ lpfc_get_atto_model_desc(phba, mdp, descp);
+ return;
+ }
+
if (phba->lmt & LMT_64Gb)
max_speed = 64;
else if (phba->lmt & LMT_32Gb)
@@ -2595,11 +2688,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
case PCI_DEVICE_ID_SAT_S:
m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
break;
- case PCI_DEVICE_ID_HORNET:
- m = (typeof(m)){"LP21000", "PCIe",
- "Obsolete, Unsupported FCoE Adapter"};
- GE = 1;
- break;
case PCI_DEVICE_ID_PROTEUS_VF:
m = (typeof(m)){"LPev12000", "PCIe IOV",
"Obsolete, Unsupported Fibre Channel Adapter"};
@@ -2688,7 +2776,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
}
/**
- * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
+ * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a IOCB ring.
* @cnt: the number of IOCBs to be posted to the IOCB ring.
@@ -2700,7 +2788,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
* The number of IOCBs NOT able to be posted to the IOCB ring.
**/
int
-lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
+lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
{
IOCB_t *icmd;
struct lpfc_iocbq *iocb;
@@ -2806,7 +2894,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */
- lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+ lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */
return 0;
@@ -2983,6 +3071,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
NLP_EVT_DEVICE_RM);
}
+ /* This is a special case flush to return all
+ * IOs before entering this loop. There are
+ * two points in the code where a flush is
+ * avoided if the FC_UNLOADING flag is set.
+ * one is in the multipool destroy,
+ * (this prevents a crash) and the other is
+ * in the nvme abort handler, ( also prevents
+ * a crash). Both of these exceptions are
+ * cases where the slot is still accessible.
+ * The flush here is only when the pci slot
+ * is offline.
+ */
+ if (vport->load_flag & FC_UNLOADING &&
+ pci_channel_offline(phba->pcidev))
+ lpfc_sli_flush_io_rings(vport->phba);
+
/* At this point, ALL ndlp's should be gone
* because of the previous NLP_EVT_DEVICE_RM.
* Lets wait for this to happen, if needed.
@@ -2995,7 +3099,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
list_for_each_entry_safe(ndlp, next_ndlp,
&vport->fc_nodes, nlp_listp) {
lpfc_printf_vlog(ndlp->vport, KERN_ERR,
- LOG_TRACE_EVENT,
+ LOG_DISCOVERY,
"0282 did:x%x ndlp:x%px "
"refcnt:%d xflags x%x nflag x%x\n",
ndlp->nlp_DID, (void *)ndlp,
@@ -3692,7 +3796,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
int i;
- int offline = 0;
+ int offline;
+ bool hba_pci_err;
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
@@ -3702,6 +3807,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
lpfc_linkdown(phba);
offline = pci_channel_offline(phba->pcidev);
+ hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Issue an unreg_login to all nodes on all vports */
vports = lpfc_create_vport_work_array(phba);
@@ -3725,11 +3831,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
spin_unlock_irq(&ndlp->lock);
- if (offline) {
+ if (offline || hba_pci_err) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~(NLP_UNREG_INP |
NLP_RPI_REGISTERED);
spin_unlock_irq(&ndlp->lock);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_sli_rpi_release(vports[i],
+ ndlp);
} else {
lpfc_unreg_rpi(vports[i], ndlp);
}
@@ -4260,8 +4369,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
qp = &phba->sli4_hba.hdwq[idx];
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
- lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
@@ -4305,9 +4413,10 @@ lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"6074 Current allocated XRI sgl count:%d, "
- "maximum XRI count:%d\n",
+ "maximum XRI count:%d els_xri_cnt:%d\n\n",
phba->sli4_hba.io_xri_cnt,
- phba->sli4_hba.io_xri_max);
+ phba->sli4_hba.io_xri_max,
+ els_xri_cnt);
cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
@@ -4446,12 +4555,11 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
pwqeq->sli4_lxritag = lxri;
pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
- pwqeq->context1 = lpfc_ncmd;
/* Initialize local short-hand pointers. */
lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
- lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+ lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
spin_lock_init(&lpfc_ncmd->buf_lock);
/* add the nvme buffer to a post list */
@@ -4460,7 +4568,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
}
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6114 Allocate %d out of %d requested new NVME "
- "buffers\n", bcnt, num_to_alloc);
+ "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
+ sizeof(*lpfc_ncmd));
+
/* post the list of nvme buffer sgls to port if available */
if (!list_empty(&post_nblist))
@@ -4507,6 +4617,17 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
return rol64(wwn, 32);
}
+static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->cfg_xpsgl && !phba->nvmet_support)
+ return LPFC_MAX_SG_TABLESIZE;
+ else
+ return phba->cfg_scsi_seg_cnt;
+ else
+ return phba->cfg_sg_seg_cnt;
+}
+
/**
* lpfc_vmid_res_alloc - Allocates resources for VMID
* @phba: pointer to lpfc hba data structure.
@@ -4609,42 +4730,26 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
/* Seed template for SCSI host registration */
if (dev == &phba->pcidev->dev) {
- template = &phba->port_template;
-
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
/* Seed physical port template */
- memcpy(template, &lpfc_template, sizeof(*template));
+ template = &lpfc_template;
if (use_no_reset_hba)
/* template is for a no reset SCSI Host */
template->eh_host_reset_handler = NULL;
- /* Template for all vports this physical port creates */
- memcpy(&phba->vport_template, &lpfc_template,
- sizeof(*template));
- phba->vport_template.shost_groups = lpfc_vport_groups;
- phba->vport_template.eh_bus_reset_handler = NULL;
- phba->vport_template.eh_host_reset_handler = NULL;
- phba->vport_template.vendor_id = 0;
-
- /* Initialize the host templates with updated value */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- template->sg_tablesize = phba->cfg_scsi_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_scsi_seg_cnt;
- } else {
- template->sg_tablesize = phba->cfg_sg_seg_cnt;
- phba->vport_template.sg_tablesize =
- phba->cfg_sg_seg_cnt;
- }
-
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
} else {
/* NVMET is for physical port only */
- memcpy(template, &lpfc_template_nvme,
- sizeof(*template));
+ template = &lpfc_template_nvme;
}
} else {
- template = &phba->vport_template;
+ /* Seed vport template */
+ template = &lpfc_vport_template;
+
+ /* Seed updated value of sg_tablesize */
+ template->sg_tablesize = lpfc_get_sg_tablesize(phba);
}
shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
@@ -4677,11 +4782,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
-
- if (phba->cfg_xpsgl && !phba->nvmet_support)
- shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
- else
- shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
} else
/* SLI-3 has a limited number of hardware queues (3),
* thus there is only one for FCP processing.
@@ -4712,7 +4812,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
rc = lpfc_vmid_res_alloc(phba, vport);
if (rc)
- goto out;
+ goto out_put_shost;
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
@@ -4730,16 +4830,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
- goto out_put_shost;
+ goto out_free_vmid;
spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list);
spin_unlock_irq(&phba->port_list_lock);
return vport;
-out_put_shost:
+out_free_vmid:
kfree(vport->vmid);
bitmap_free(vport->vmid_priority_range);
+out_put_shost:
scsi_host_put(shost);
out:
return NULL;
@@ -5295,7 +5396,6 @@ static void
lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
struct lpfc_acqe_link *acqe_link)
{
- struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
@@ -5312,18 +5412,13 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
"0395 The mboxq allocation failed\n");
return;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
+
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0396 The lpfc_dmabuf allocation failed\n");
+ "0396 mailbox allocation failed\n");
goto out_free_pmb;
}
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0397 The mbuf allocation failed\n");
- goto out_free_dmabuf;
- }
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
@@ -5335,7 +5430,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -5374,7 +5469,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
- goto out_free_dmabuf;
+ goto out_free_pmb;
return;
}
/*
@@ -5409,10 +5504,8 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
return;
-out_free_dmabuf:
- kfree(mp);
out_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
@@ -5470,38 +5563,12 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
void
lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
{
- struct rxtable_entry *entry;
- int cnt = 0, head, tail, last, start;
-
- head = atomic_read(&phba->rxtable_idx_head);
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (!phba->rxtable || head == tail) {
- lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
- "4411 Rxtable is empty\n");
- return;
- }
- last = tail;
- start = head;
-
- /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
- while (start != last) {
- if (start)
- start--;
- else
- start = LPFC_MAX_RXMONITOR_ENTRY - 1;
- entry = &phba->rxtable[start];
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
- "Lat %lld ASz %lld Info %02d BWUtil %d "
- "Int %d slot %d\n",
- cnt, entry->max_bytes_per_interval,
- entry->total_bytes, entry->rcv_bytes,
- entry->avg_io_latency, entry->avg_io_size,
- entry->cmf_info, entry->timer_utilization,
- entry->timer_interval, start);
- cnt++;
- if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
- return;
+ "4411 Rx Monitor Info is empty.\n");
+ } else {
+ lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
+ LPFC_MAX_RXMONITOR_DUMP);
}
}
@@ -5519,7 +5586,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
struct tm broken;
struct timespec64 cur_time;
u32 cnt;
- u16 value;
+ u32 value;
/* Make sure we have a congestion info buffer */
if (!phba->cgn_i)
@@ -5852,21 +5919,8 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
/* Use the frequency found in the last rcv'ed FPIN */
value = phba->cgn_fpin_frequency;
- if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN)
- cp->cgn_warn_freq = cpu_to_le16(value);
- if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM)
- cp->cgn_alarm_freq = cpu_to_le16(value);
-
- /* Frequency (in ms) Signal Warning/Signal Congestion Notifications
- * are received by the HBA
- */
- value = phba->cgn_sig_freq;
-
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
- phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
- cp->cgn_warn_freq = cpu_to_le16(value);
- if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
- cp->cgn_alarm_freq = cpu_to_le16(value);
+ cp->cgn_warn_freq = cpu_to_le16(value);
+ cp->cgn_alarm_freq = cpu_to_le16(value);
lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
LPFC_CGN_CRC32_SEED);
@@ -5921,11 +5975,10 @@ lpfc_cmf_timer(struct hrtimer *timer)
{
struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
cmf_timer);
- struct rxtable_entry *entry;
+ struct rx_info_entry entry;
uint32_t io_cnt;
- uint32_t head, tail;
uint32_t busy, max_read;
- uint64_t total, rcv, lat, mbpi, extra;
+ uint64_t total, rcv, lat, mbpi, extra, cnt;
int timer_interval = LPFC_CMF_INTERVAL;
uint32_t ms;
struct lpfc_cgn_stat *cgs;
@@ -5996,20 +6049,28 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Calculate any extra bytes needed to account for the
* timer accuracy. If we are less than LPFC_CMF_INTERVAL
- * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
- * add an extra 2%. The goal is to equalize total with a
- * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
+ * calculate the adjustment needed for total to reflect
+ * a full LPFC_CMF_INTERVAL.
*/
- if (ms == LPFC_CMF_INTERVAL)
- extra = div_u64(total, 50);
- else if (ms < LPFC_CMF_INTERVAL)
- extra = div_u64(total, 33);
+ if (ms && ms < LPFC_CMF_INTERVAL) {
+ cnt = div_u64(total, ms); /* bytes per ms */
+ cnt *= LPFC_CMF_INTERVAL; /* what total should be */
+
+ /* If the timeout is scheduled to be shorter,
+ * this value may skew the data, so cap it at mbpi.
+ */
+ if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
+ cnt = mbpi;
+
+ extra = cnt - total;
+ }
lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
} else {
/* For Monitor mode or link down we want mbpi
* to be the full link speed
*/
mbpi = phba->cmf_link_byte_count;
+ extra = 0;
}
phba->cmf_timer_cnt++;
@@ -6035,39 +6096,30 @@ lpfc_cmf_timer(struct hrtimer *timer)
}
/* Save rxmonitor information for debug */
- if (phba->rxtable) {
- head = atomic_xchg(&phba->rxtable_idx_head,
- LPFC_RXMONITOR_TABLE_IN_USE);
- entry = &phba->rxtable[head];
- entry->total_bytes = total;
- entry->rcv_bytes = rcv;
- entry->cmf_busy = busy;
- entry->cmf_info = phba->cmf_active_info;
+ if (phba->rx_monitor) {
+ entry.total_bytes = total;
+ entry.cmf_bytes = total + extra;
+ entry.rcv_bytes = rcv;
+ entry.cmf_busy = busy;
+ entry.cmf_info = phba->cmf_active_info;
if (io_cnt) {
- entry->avg_io_latency = div_u64(lat, io_cnt);
- entry->avg_io_size = div_u64(rcv, io_cnt);
+ entry.avg_io_latency = div_u64(lat, io_cnt);
+ entry.avg_io_size = div_u64(rcv, io_cnt);
} else {
- entry->avg_io_latency = 0;
- entry->avg_io_size = 0;
+ entry.avg_io_latency = 0;
+ entry.avg_io_size = 0;
}
- entry->max_read_cnt = max_read;
- entry->io_cnt = io_cnt;
- entry->max_bytes_per_interval = mbpi;
+ entry.max_read_cnt = max_read;
+ entry.io_cnt = io_cnt;
+ entry.max_bytes_per_interval = mbpi;
if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
- entry->timer_utilization = phba->cmf_last_ts;
+ entry.timer_utilization = phba->cmf_last_ts;
else
- entry->timer_utilization = ms;
- entry->timer_interval = ms;
+ entry.timer_utilization = ms;
+ entry.timer_interval = ms;
phba->cmf_last_ts = 0;
- /* Increment rxtable index */
- head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- tail = atomic_read(&phba->rxtable_idx_tail);
- if (head == tail) {
- tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
- atomic_set(&phba->rxtable_idx_tail, tail);
- }
- atomic_set(&phba->rxtable_idx_head, head);
+ lpfc_rx_monitor_record(phba->rx_monitor, &entry);
}
if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
@@ -6082,6 +6134,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Each minute save Fabric and Driver congestion information */
lpfc_cgn_save_evt_cnt(phba);
+ phba->hba_flag &= ~HBA_SHORT_CMF;
+
/* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
* minute, adjust our next timer interval, if needed, to ensure a
* 1 minute granularity when we get the next timer interrupt.
@@ -6092,6 +6146,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
jiffies);
if (timer_interval <= 0)
timer_interval = LPFC_CMF_INTERVAL;
+ else
+ phba->hba_flag |= HBA_SHORT_CMF;
/* If we adjust timer_interval, max_bytes_per_interval
* needs to be adjusted as well.
@@ -6133,6 +6189,7 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
{
uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
+ u8 cnt = 0;
phba->sli4_hba.link_state.speed =
lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
@@ -6151,26 +6208,36 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
phba->trunk_link.link1.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
phba->trunk_link.link2.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
+ cnt++;
}
if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
phba->trunk_link.link3.state =
bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
? LPFC_LINK_UP : LPFC_LINK_DOWN;
phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
+ cnt++;
}
+ if (cnt)
+ phba->trunk_link.phy_lnk_speed =
+ phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
+ else
+ phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
+
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2910 Async FC Trunking Event - Speed:%d\n"
"\tLogical speed:%d "
@@ -6210,7 +6277,6 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
static void
lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
{
- struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
@@ -6249,7 +6315,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
LPFC_FC_LA_TYPE_LINK_DOWN)
phba->sli4_hba.link_state.logical_speed = 0;
- else if (!phba->sli4_hba.conf_trunk)
+ else if (!phba->sli4_hba.conf_trunk)
phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
@@ -6270,18 +6336,12 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
"2897 The mboxq allocation failed\n");
return;
}
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (!mp) {
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2898 The lpfc_dmabuf allocation failed\n");
+ "2898 The mboxq prep failed\n");
goto out_free_pmb;
}
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp->virt) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2899 The mbuf allocation failed\n");
- goto out_free_dmabuf;
- }
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
@@ -6293,7 +6353,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
phba->sli.slistat.link_event++;
/* Create lpfc_handle_latt mailbox command from link ACQE */
- lpfc_read_topology(phba, pmb, mp);
+ lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = phba->pport;
@@ -6338,13 +6398,11 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
- goto out_free_dmabuf;
+ goto out_free_pmb;
return;
-out_free_dmabuf:
- kfree(mp);
out_free_pmb:
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
@@ -6375,7 +6433,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
"2901 Async SLI event - Type:%d, Event Data: x%08x "
"x%08x x%08x x%08x\n", evt_type,
acqe_sli->event_data1, acqe_sli->event_data2,
- acqe_sli->reserved, acqe_sli->trailer);
+ acqe_sli->event_data3, acqe_sli->trailer);
port_name = phba->Port[0];
if (port_name == 0x00)
@@ -6404,7 +6462,7 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
temp_event_data.event_code = LPFC_NORMAL_TEMP;
temp_event_data.data = (uint32_t)acqe_sli->event_data1;
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
"3191 Normal Temperature:%d Celsius - Port Name %c\n",
acqe_sli->event_data1, port_name);
@@ -6536,12 +6594,15 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
/* Misconfigured WWN. Reports that the SLI Port is configured
* to use FA-WWN, but the attached device doesn’t support it.
- * No driver action is required.
* Event Data1 - N.A, Event Data2 - N.A
+ * This event only happens on the physical port.
*/
- lpfc_log_msg(phba, KERN_WARNING, LOG_SLI,
- "2699 Misconfigured FA-WWN - Attached device does "
- "not support FA-WWN\n");
+ lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
+ "2699 Misconfigured FA-PWWN - Attached device "
+ "does not support FA-PWWN\n");
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
+ memset(phba->pport->fc_portname.u.wwn, 0,
+ sizeof(struct lpfc_name));
break;
case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
/* EEPROM failure. No driver action is required */
@@ -6566,9 +6627,6 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
/* Alarm overrides warning, so check that first */
if (cgn_signal->alarm_cnt) {
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- /* Keep track of alarm cnt for cgn_info */
- atomic_add(cgn_signal->alarm_cnt,
- &phba->cgn_fabric_alarm_cnt);
/* Keep track of alarm cnt for CMF_SYNC_WQE */
atomic_add(cgn_signal->alarm_cnt,
&phba->cgn_sync_alarm_cnt);
@@ -6577,13 +6635,20 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
/* signal action needs to be taken */
if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
- /* Keep track of warning cnt for cgn_info */
- atomic_add(cnt, &phba->cgn_fabric_warn_cnt);
/* Keep track of warning cnt for CMF_SYNC_WQE */
atomic_add(cnt, &phba->cgn_sync_warn_cnt);
}
}
break;
+ case LPFC_SLI_EVENT_TYPE_RD_SIGNAL:
+ /* May be accompanied by a temperature event */
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI | LOG_LINK_EVENT | LOG_LDS_EVENT,
+ "2902 Remote Degrade Signaling: x%08x x%08x "
+ "x%08x\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ acqe_sli->event_data3);
+ break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3193 Unrecognized SLI event, type: 0x%x",
@@ -6997,6 +7062,12 @@ lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
spin_unlock_irq(&phba->hbalock);
}
+static const char * const lpfc_cmf_mode_to_str[] = {
+ "OFF",
+ "MANAGED",
+ "MONITOR",
+};
+
/**
* lpfc_cgn_params_parse - Process a FW cong parm change event
* @phba: pointer to lpfc hba data structure.
@@ -7016,6 +7087,7 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
{
struct lpfc_cgn_info *cp;
uint32_t crc, oldmode;
+ char acr_string[4] = {0};
/* Make sure the FW has encoded the correct magic number to
* validate the congestion parameter in FW memory.
@@ -7092,9 +7164,6 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MONITOR:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4661 Switch from MANAGED to "
- "`MONITOR mode\n");
phba->cmf_max_bytes_per_interval =
phba->cmf_link_byte_count;
@@ -7113,14 +7182,26 @@ lpfc_cgn_params_parse(struct lpfc_hba *phba,
lpfc_issue_els_edc(phba->pport, 0);
break;
case LPFC_CFG_MANAGED:
- lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
- "4662 Switch from MONITOR to "
- "MANAGED mode\n");
lpfc_cmf_signal_init(phba);
break;
}
break;
}
+ if (oldmode != LPFC_CFG_OFF ||
+ oldmode != phba->cgn_p.cgn_param_mode) {
+ if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
+ scnprintf(acr_string, sizeof(acr_string), "%u",
+ phba->cgn_p.cgn_param_level0);
+ else
+ scnprintf(acr_string, sizeof(acr_string), "NA");
+
+ dev_info(&phba->pcidev->dev, "%d: "
+ "4663 CMF: Mode %s acr %s\n",
+ phba->brd_no,
+ lpfc_cmf_mode_to_str
+ [phba->cgn_p.cgn_param_mode],
+ acr_string);
+ }
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
"4669 FW cgn parm buf wrong magic 0x%x "
@@ -7602,7 +7683,6 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->port_list);
INIT_LIST_HEAD(&phba->work_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
/* Initialize the wait queue head for the kernel thread */
init_waitqueue_head(&phba->work_waitq);
@@ -7686,13 +7766,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
if (rc)
return -ENODEV;
- if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
- phba->menlo_flag |= HBA_MENLO_SUPPORT;
- /* check for menlo minimum sg count */
- if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
- phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
- }
-
if (!phba->sli.sli3_ring)
phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
sizeof(struct lpfc_sli_ring),
@@ -7868,6 +7941,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* The lpfc_wq workqueue for deferred irq use */
phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+ if (!phba->wq)
+ return -ENOMEM;
/*
* Initialize timers used by driver
@@ -7971,7 +8046,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -7998,6 +8073,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = lpfc_sli4_read_config(phba);
if (unlikely(rc))
goto out_free_bsmbx;
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
+ /* Right now the link is down, if FA-PWWN is configured the
+ * firmware will try FLOGI before the driver gets a link up.
+ * If it fails, the driver should get a MISCONFIGURED async
+ * event which will clear this flag. The only notification
+ * the driver gets is if it fails, if it succeeds there is no
+ * notification given. Assume success.
+ */
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+ }
+
rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
if (unlikely(rc))
goto out_free_bsmbx;
@@ -8221,8 +8308,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
&phba->pcidev->dev,
phba->cfg_sg_dma_buf_size,
i, 0);
- if (!phba->lpfc_sg_dma_buf_pool)
+ if (!phba->lpfc_sg_dma_buf_pool) {
+ rc = -ENOMEM;
goto out_free_bsmbx;
+ }
phba->lpfc_cmd_rsp_buf_pool =
dma_pool_create("lpfc_cmd_rsp_buf_pool",
@@ -8230,8 +8319,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp),
i, 0);
- if (!phba->lpfc_cmd_rsp_buf_pool)
+ if (!phba->lpfc_cmd_rsp_buf_pool) {
+ rc = -ENOMEM;
goto out_free_sg_dma_buf;
+ }
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -8387,6 +8478,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
@@ -8529,7 +8623,6 @@ static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
{
if (phba->wq) {
- flush_workqueue(phba->wq);
destroy_workqueue(phba->wq);
phba->wq = NULL;
}
@@ -8973,6 +9066,36 @@ lpfc_hba_free(struct lpfc_hba *phba)
}
/**
+ * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is will setup initial FDMI attribute masks for
+ * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
+ * to get these attributes first before falling back, the attribute
+ * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
+ **/
+void
+lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+
+ vport->load_flag |= FC_ALLOW_FDMI;
+ if (phba->cfg_enable_SmartSAN ||
+ phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
+ /* Setup appropriate attribute masks */
+ vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
+ if (phba->cfg_enable_SmartSAN)
+ vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
+ else
+ vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
+ "6077 Setup FDMI mask: hba x%x port x%x\n",
+ vport->fdmi_hba_mask, vport->fdmi_port_mask);
+}
+
+/**
* lpfc_create_shost - Create hba physical port with associated scsi host.
* @phba: pointer to lpfc hba data structure.
*
@@ -9015,21 +9138,12 @@ lpfc_create_shost(struct lpfc_hba *phba)
/* Put reference to SCSI host to driver's device private data */
pci_set_drvdata(phba->pcidev, shost);
+ lpfc_setup_fdmi_mask(vport);
+
/*
* At this point we are fully registered with PSA. In addition,
* any initial discovery should be completed.
*/
- vport->load_flag |= FC_ALLOW_FDMI;
- if (phba->cfg_enable_SmartSAN ||
- (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
-
- /* Setup appropriate attribute masks */
- vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
- if (phba->cfg_enable_SmartSAN)
- vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
- else
- vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
- }
return 0;
}
@@ -9802,7 +9916,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
struct lpfc_rsrc_desc_fcfcoe *desc;
char *pdesc_0;
uint16_t forced_link_speed;
- uint32_t if_type, qmin;
+ uint32_t if_type, qmin, fawwpn;
int length, i, rc = 0, rc2;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -9844,10 +9958,24 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
}
+ fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
+
+ if (fawwpn) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_INIT | LOG_DISCOVERY,
+ "2702 READ_CONFIG: FA-PWWN is "
+ "configured on\n");
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
+ } else {
+ /* Clear FW configured flag, preserve driver flag */
+ phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
+ }
+
phba->sli4_hba.conf_trunk =
bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
+
phba->sli4_hba.max_cfg_param.max_xri =
bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
/* Reduce resource usage in kdump environment */
@@ -12053,7 +12181,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
rc = pci_enable_msi(phba->pcidev);
if (!rc)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0462 PCI enable MSI mode success.\n");
+ "0012 PCI enable MSI mode success.\n");
else {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0471 PCI enable MSI mode failed (%d)\n", rc);
@@ -12285,7 +12413,7 @@ lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
for (i = 0; i < phba->cfg_irq_chann; i++) {
eqhdl = lpfc_get_eq_hdl(i);
- eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
+ eqhdl->irq = LPFC_IRQ_EMPTY;
eqhdl->phba = phba;
}
}
@@ -12658,7 +12786,7 @@ static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
{
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ if (phba->pport && (phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
__lpfc_cpuhp_remove(phba);
@@ -12709,7 +12837,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
cpumask_clear(&eqhdl->aff_mask);
cpumask_set_cpu(cpu, &eqhdl->aff_mask);
irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+ irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
}
/**
@@ -12922,9 +13050,17 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_DRIVER_HANDLER_NAME"%d", index);
eqhdl->idx = index;
- rc = request_irq(pci_irq_vector(phba->pcidev, index),
- &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = pci_irq_vector(phba->pcidev, index);
+ if (rc < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0489 MSI-X fast-path (%d) "
+ "pci_irq_vec failed (%d)\n", index, rc);
+ goto cfg_fail_out;
+ }
+ eqhdl->irq = rc;
+
+ rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
+ name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -12932,8 +13068,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
goto cfg_fail_out;
}
- eqhdl->irq = pci_irq_vector(phba->pcidev, index);
-
if (aff_mask) {
/* If found a neighboring online cpu, set affinity */
if (cpu_select < nr_cpu_ids)
@@ -12998,7 +13132,6 @@ cfg_fail_out:
for (--index; index >= 0; index--) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
@@ -13051,7 +13184,14 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
}
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ rc = pci_irq_vector(phba->pcidev, 0);
+ if (rc < 0) {
+ pci_free_irq_vectors(phba->pcidev);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0496 MSI pci_irq_vec failed (%d)\n", rc);
+ return rc;
+ }
+ eqhdl->irq = rc;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
@@ -13078,8 +13218,8 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
* MSI-X -> MSI -> IRQ.
*
* Return codes
- * 0 - successful
- * other values - error
+ * Interrupt mode (2, 1, 0) - successful
+ * LPFC_INTR_ERROR - error
**/
static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
@@ -13124,7 +13264,14 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
intr_mode = 0;
eqhdl = lpfc_get_eq_hdl(0);
- eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
+ retval = pci_irq_vector(phba->pcidev, 0);
+ if (retval < 0) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0502 INTR pci_irq_vec failed (%d)\n",
+ retval);
+ return LPFC_INTR_ERROR;
+ }
+ eqhdl->irq = retval;
cpu = cpumask_first(cpu_present_mask);
lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
@@ -13159,7 +13306,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
for (index = 0; index < phba->cfg_irq_chann; index++) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
} else {
@@ -13351,8 +13497,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Abort all iocbs associated with the hba */
lpfc_sli_hba_iocb_abort(phba);
- /* Wait for completion of device XRI exchange busy */
- lpfc_sli4_xri_exchange_busy_wait(phba);
+ if (!pci_channel_offline(phba->pcidev))
+ /* Wait for completion of device XRI exchange busy */
+ lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
if (phba->pport)
@@ -13371,15 +13518,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Disable FW logging to host memory */
lpfc_ras_stop_fwlog(phba);
- /* Unset the queues shared with the hardware then release all
- * allocated resources.
- */
- lpfc_sli4_queue_unset(phba);
- lpfc_sli4_queue_destroy(phba);
-
/* Reset SLI4 HBA FCoE function */
lpfc_pci_function_reset(phba);
+ /* release all queue allocated resources. */
+ lpfc_sli4_queue_destroy(phba);
+
/* Free RAS DMA memory */
if (phba->ras_fwlog.ras_enabled)
lpfc_sli4_ras_dma_free(phba);
@@ -13466,7 +13610,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
phba->cgn_evt_minute = 0;
phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
- memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
+ memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
cp->cgn_info_version = LPFC_CGN_INFO_V3;
@@ -13525,7 +13669,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba)
return;
cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
- memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
+ memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
ktime_get_real_ts64(&cmpl_time);
time64_to_tm(cmpl_time.tv_sec, 0, &broken);
@@ -14259,6 +14403,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
"2711 PCI channel permanent disable for failure\n");
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
+ lpfc_sli4_prep_dev_for_reset(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -14807,9 +14952,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
- /* Enable RAS FW log support */
- lpfc_sli4_ras_setup(phba);
-
timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
@@ -15054,24 +15196,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
static void
lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2826 PCI channel disable preparing for reset\n");
+ int offline = pci_channel_offline(phba->pcidev);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2826 PCI channel disable preparing for reset offline"
+ " %d\n", offline);
/* Block any management I/Os to the device */
lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
+ /* HBA_PCI_ERR was set in io_error_detect */
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
/* Flush all driver's outstanding I/Os as we are to reset */
lpfc_sli_flush_io_rings(phba);
+ lpfc_offline(phba);
/* stop all timers */
lpfc_stop_hba_timers(phba);
+ lpfc_sli4_queue_destroy(phba);
/* Disable interrupt and pci device */
lpfc_sli4_disable_intr(phba);
- lpfc_sli4_queue_destroy(phba);
pci_disable_device(phba->pcidev);
}
@@ -15120,6 +15266,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ bool hba_pci_err;
switch (state) {
case pci_channel_io_normal:
@@ -15127,17 +15274,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
lpfc_sli4_prep_dev_for_recover(phba);
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Fatal error, prepare for slot reset */
- lpfc_sli4_prep_dev_for_reset(phba);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
+ else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2832 Already handling PCI error "
+ "state: x%x\n", state);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- phba->hba_flag |= HBA_PCI_ERR;
+ set_bit(HBA_PCI_ERR, &phba->bit_flags);
/* Permanent failure, prepare for device down */
lpfc_sli4_prep_dev_for_perm_failure(phba);
return PCI_ERS_RESULT_DISCONNECT;
default:
- phba->hba_flag |= HBA_PCI_ERR;
+ hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ lpfc_sli4_prep_dev_for_reset(phba);
/* Unknown state, prepare and request slot reset */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2825 Unknown PCI error state: x%x\n", state);
@@ -15171,17 +15325,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
struct lpfc_sli *psli = &phba->sli;
uint32_t intr_mode;
+ bool hba_pci_err;
dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
if (pci_enable_device_mem(pdev)) {
printk(KERN_ERR "lpfc: Cannot re-enable "
- "PCI device after reset.\n");
+ "PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_restore_state(pdev);
- phba->hba_flag &= ~HBA_PCI_ERR;
+ hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+ if (!hba_pci_err)
+ dev_info(&pdev->dev,
+ "hba_pci_err was not set, recovering slot reset.\n");
/*
* As the new kernel behavior of pci_restore_state() API call clears
* device saved_state flag, need to save the restored state again.
@@ -15195,6 +15353,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
+ /* Init cpu_map array */
+ lpfc_cpu_map_array_init(phba);
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -15236,8 +15396,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
*/
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
/* Perform device reset */
- lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Bring the device back online */
lpfc_online(phba);
@@ -15659,34 +15817,7 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
unsigned int temp_idx;
int i;
int j = 0;
- unsigned long rem_nsec, iflags;
- bool log_verbose = false;
- struct lpfc_vport *port_iterator;
-
- /* Don't dump messages if we explicitly set log_verbose for the
- * physical port or any vport.
- */
- if (phba->cfg_log_verbose)
- return;
-
- spin_lock_irqsave(&phba->port_list_lock, iflags);
- list_for_each_entry(port_iterator, &phba->port_list, listentry) {
- if (port_iterator->load_flag & FC_UNLOADING)
- continue;
- if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- if (port_iterator->cfg_log_verbose)
- log_verbose = true;
-
- scsi_host_put(lpfc_shost_from_vport(port_iterator));
-
- if (log_verbose) {
- spin_unlock_irqrestore(&phba->port_list_lock,
- iflags);
- return;
- }
- }
- }
- spin_unlock_irqrestore(&phba->port_list_lock, iflags);
+ unsigned long rem_nsec;
if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
return;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 7d480c798794..b39cefcd8703 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,7 +35,7 @@
#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */
#define LOG_LIBDFC 0x00002000 /* Libdfc events */
#define LOG_VPORT 0x00004000 /* NPIV events */
-#define LOG_SECURITY 0x00008000 /* Security events */
+#define LOG_LDS_EVENT 0x00008000 /* Link Degrade Signaling events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
@@ -73,7 +73,7 @@ do { \
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
do { \
{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) { \
- if ((mask) & LOG_TRACE_EVENT) \
+ if ((mask) & LOG_TRACE_EVENT && !(vport)->cfg_log_verbose) \
lpfc_dmp_dbg((vport)->phba); \
dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
fmt, (vport)->phba->brd_no, vport->vpi, ##arg); \
@@ -89,11 +89,11 @@ do { \
(phba)->pport->cfg_log_verbose : \
(phba)->cfg_log_verbose; \
if (((mask) & log_verbose) || (level[1] <= '3')) { \
- if ((mask) & LOG_TRACE_EVENT) \
+ if ((mask) & LOG_TRACE_EVENT && !log_verbose) \
lpfc_dmp_dbg(phba); \
dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
fmt, phba->brd_no, ##arg); \
- } else if (!(phba)->cfg_log_verbose)\
+ } else if (!log_verbose)\
lpfc_dbg_print(phba, "%d:" fmt, phba->brd_no, ##arg); \
} \
} while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6c754ee96bee..9858b1743769 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -44,6 +44,80 @@
#include "lpfc_compat.h"
/**
+ * lpfc_mbox_rsrc_prep - Prepare a mailbox with DMA buffer memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * one or more DMA buffers for the data transfer. This routine provides
+ * a standard framework for allocating the dma buffer and assigning to the
+ * @mbox. Callers should cleanup the mbox with a call to
+ * lpfc_mbox_rsrc_cleanup.
+ *
+ * The lpfc_mbuf_alloc routine acquires the hbalock so the caller is
+ * responsible to ensure the hbalock is released. Also note that the
+ * driver design is a single dmabuf/mbuf per mbox in the ctx_buf.
+ *
+ **/
+int
+lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+ struct lpfc_dmabuf *mp;
+
+ mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ if (!mp)
+ return -ENOMEM;
+
+ mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+ if (!mp->virt) {
+ kfree(mp);
+ return -ENOMEM;
+ }
+
+ memset(mp->virt, 0, LPFC_BPL_SIZE);
+
+ /* Initialization only. Driver does not use a list of dmabufs. */
+ INIT_LIST_HEAD(&mp->list);
+ mbox->ctx_buf = mp;
+ return 0;
+}
+
+/**
+ * lpfc_mbox_rsrc_cleanup - Free the mailbox DMA buffer and virtual memory.
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to the driver internal queue element for mailbox command.
+ * @locked: value that indicates if the hbalock is held (1) or not (0).
+ *
+ * A mailbox command consists of the pool memory for the command, @mbox, and
+ * possibly a DMA buffer for the data transfer. This routine provides
+ * a standard framework for releasing any dma buffers and freeing all
+ * memory resources in it as well as releasing the @mbox back to the @phba pool.
+ * Callers should use this routine for cleanup for all mailboxes prepped with
+ * lpfc_mbox_rsrc_prep.
+ *
+ **/
+void
+lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
+ enum lpfc_mbox_ctx locked)
+{
+ struct lpfc_dmabuf *mp;
+
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ mbox->ctx_buf = NULL;
+
+ /* Release the generic BPL buffer memory. */
+ if (mp) {
+ if (locked == MBOX_THD_LOCKED)
+ __lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ else
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+
+ mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
* lpfc_dump_static_vport - Dump HBA's static vport information.
* @phba: pointer to lpfc hba data structure.
* @pmb: pointer to the driver internal queue element for mailbox command.
@@ -61,6 +135,7 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
{
MAILBOX_t *mb;
struct lpfc_dmabuf *mp;
+ int rc;
mb = &pmb->u.mb;
@@ -79,22 +154,15 @@ lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
return 0;
}
- /* For SLI4 HBAs driver need to allocate memory */
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
- "2605 lpfc_dump_static_vport: memory"
- " allocation failed\n");
+ "2605 %s: memory allocation failed\n",
+ __func__);
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
- /* save address for completion */
- pmb->ctx_buf = (uint8_t *)mp;
+
+ mp = pmb->ctx_buf;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
@@ -429,7 +497,7 @@ lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
/*
- * SLI-3, Message Signaled Interrupt Fearure.
+ * SLI-3, Message Signaled Interrupt Feature.
*/
/* Multi-message attention configuration */
@@ -606,26 +674,21 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
{
struct lpfc_dmabuf *mp;
MAILBOX_t *mb;
+ int rc;
- mb = &pmb->u.mb;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
- mb->mbxOwner = OWN_HOST;
-
/* Get a buffer to hold the HBAs Service Parameters */
-
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
- mb->mbxCommand = MBX_READ_SPARM64;
- /* READ_SPARAM: no buffers */
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"0301 READ_SPARAM: no buffers\n");
- return (1);
+ return 1;
}
- INIT_LIST_HEAD(&mp->list);
+
+ mp = pmb->ctx_buf;
+ mb = &pmb->u.mb;
+ mb->mbxOwner = OWN_HOST;
mb->mbxCommand = MBX_READ_SPARM64;
mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -633,9 +696,6 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
if (phba->sli_rev >= LPFC_SLI_REV3)
mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
- /* save address for completion */
- pmb->ctx_buf = mp;
-
return (0);
}
@@ -756,6 +816,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
MAILBOX_t *mb = &pmb->u.mb;
uint8_t *sparam;
struct lpfc_dmabuf *mp;
+ int rc;
memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -766,12 +827,10 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
mb->un.varRegLogin.did = did;
mb->mbxOwner = OWN_HOST;
+
/* Get a buffer to hold NPorts Service Parameters */
- mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, pmb);
+ if (rc) {
mb->mbxCommand = MBX_REG_LOGIN64;
/* REG_LOGIN: no buffers */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
@@ -779,15 +838,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
"rpi x%x\n", vpi, did, rpi);
return 1;
}
- INIT_LIST_HEAD(&mp->list);
- sparam = mp->virt;
/* Copy param's into a new buffer */
+ mp = pmb->ctx_buf;
+ sparam = mp->virt;
memcpy(sparam, param, sizeof (struct serv_parm));
- /* save address for completion */
- pmb->ctx_buf = (uint8_t *)mp;
-
+ /* Finish initializing the mailbox. */
mb->mbxCommand = MBX_REG_LOGIN64;
mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
@@ -1723,7 +1780,9 @@ lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
* @phba: pointer to lpfc hba data structure.
* @mbox: pointer to lpfc mbox command.
*
- * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ * This routine cleans up and releases an SLI4 mailbox command that was
+ * configured using lpfc_sli4_config. It accounts for the embedded and
+ * non-embedded config types.
**/
void
lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
@@ -2277,33 +2336,24 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
+ int rc;
memset(mbox, 0, sizeof(*mbox));
mb = &mbox->u.mb;
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
-
- if (!mp || !mp->virt) {
- kfree(mp);
- /* dump config region 23 failed to allocate memory */
+ rc = lpfc_mbox_rsrc_prep(phba, mbox);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "2569 lpfc dump config region 23: memory"
- " allocation failed\n");
+ "2569 %s: memory allocation failed\n",
+ __func__);
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
-
- /* save address for completion */
- mbox->ctx_buf = (uint8_t *)mp;
-
mb->mbxCommand = MBX_DUMP_MEMORY;
mb->un.varDmp.type = DMP_NV_PARAMS;
mb->un.varDmp.region_id = DMP_REGION_23;
mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
+ mp = mbox->ctx_buf;
mb->un.varWords[3] = putPaddrLow(mp->phys);
mb->un.varWords[4] = putPaddrHigh(mp->phys);
return 0;
@@ -2326,7 +2376,7 @@ lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
rc = SUCCESS;
mbx_failed:
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, rc);
}
@@ -2338,30 +2388,25 @@ lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
(struct lpfc_rdp_context *)(mbox->ctx_ndlp);
if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
- goto error_mbuf_free;
+ goto error_mbox_free;
lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
DMP_SFF_PAGE_A2_SIZE);
- /* We don't need dma buffer for link stat. */
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-
- memset(mbox, 0, sizeof(*mbox));
lpfc_read_lnk_stat(phba, mbox);
mbox->vport = rdp_context->ndlp->vport;
+
+ /* Save the dma buffer for cleanup in the final completion. */
+ mbox->ctx_buf = mp;
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
- goto error_cmd_free;
+ goto error_mbox_free;
return;
-error_mbuf_free:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
-error_cmd_free:
- lpfc_sli4_mbox_cmd_free(phba, mbox);
+error_mbox_free:
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, FAILURE);
}
@@ -2409,9 +2454,7 @@ lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
return;
error:
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- lpfc_sli4_mbox_cmd_free(phba, mbox);
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
rdp_context->cmpl(phba, rdp_context, FAILURE);
}
@@ -2427,27 +2470,19 @@ error:
int
lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
+ int rc;
struct lpfc_dmabuf *mp = NULL;
memset(mbox, 0, sizeof(*mbox));
- mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
- if (mp)
- mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
- if (!mp || !mp->virt) {
- kfree(mp);
+ rc = lpfc_mbox_rsrc_prep(phba, mbox);
+ if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
"3569 dump type 3 page 0xA0 allocation failed\n");
return 1;
}
- memset(mp->virt, 0, LPFC_BPL_SIZE);
- INIT_LIST_HEAD(&mp->list);
-
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
- /* save address for completion */
- mbox->ctx_buf = mp;
-
bf_set(lpfc_mbx_memory_dump_type3_type,
&mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
bf_set(lpfc_mbx_memory_dump_type3_link,
@@ -2456,6 +2491,8 @@ lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
&mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
bf_set(lpfc_mbx_memory_dump_type3_length,
&mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
+
+ mp = mbox->ctx_buf;
mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 870e53b8f81d..89cbeba06aea 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -344,9 +344,12 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
phba->cgn_i = NULL;
}
- /* Free RX table */
- kfree(phba->rxtable);
- phba->rxtable = NULL;
+ /* Free RX Monitor */
+ if (phba->rx_monitor) {
+ lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ }
/* Free the iocb lookup array */
kfree(psli->iocbq_lookup);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27263f02ab9f..b86ff9fcdf0c 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -171,12 +171,11 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
void *ptr = NULL;
- IOCB_t *irsp;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
- /* For lpfc_els_abort, context2 could be zero'ed to delay
+ /* For lpfc_els_abort, cmd_dmabuf could be zero'ed to delay
* freeing associated memory till after ABTS completes.
*/
if (pcmd) {
@@ -187,10 +186,16 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
}
} else {
- /* Force ulpStatus error since we are returning NULL ptr */
- if (!(irsp->ulpStatus)) {
- irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
- irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
+ /* Force ulp_status error since we are returning NULL ptr */
+ if (!(ulp_status)) {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status, &rspiocb->wcqe_cmpl,
+ IOSTAT_LOCAL_REJECT);
+ rspiocb->wcqe_cmpl.parameter = IOERR_SLI_ABORTED;
+ } else {
+ rspiocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
+ rspiocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
+ }
}
ptr = NULL;
}
@@ -324,6 +329,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_dmabuf *pcmd;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
+ union lpfc_wqe128 *wqe;
IOCB_t *icmd;
struct serv_parm *sp;
uint32_t ed_tov;
@@ -333,9 +339,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct ls_rjt stat;
uint32_t vid, flag;
int rc;
+ u32 remote_did;
memset(&stat, 0, sizeof (struct ls_rjt));
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
if (wwn_to_u64(sp->portName.u.wwn) == 0) {
@@ -366,7 +373,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
NULL);
return 0;
}
- icmd = &cmdiocb->iocb;
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ wqe = &cmdiocb->wqe;
+ else
+ icmd = &cmdiocb->iocb;
/* PLOGI chkparm OK */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -456,7 +467,12 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
/* rcv'ed PLOGI decides what our NPortId will be */
- vport->fc_myDID = icmd->un.rcvels.parmRo;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ vport->fc_myDID = bf_get(els_rsp64_sid,
+ &cmdiocb->wqe.xmit_els_rsp);
+ } else {
+ vport->fc_myDID = icmd->un.rcvels.parmRo;
+ }
/* If there is an outstanding FLOGI, abort it now.
* The remote NPort is not going to ACC our FLOGI
@@ -497,6 +513,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_config_link(phba, link_mbox);
link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
link_mbox->vport = vport;
+
+ /* The default completion handling for CONFIG_LINK
+ * does not require the ndlp so no reference is needed.
+ */
link_mbox->ctx_ndlp = ndlp;
rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
@@ -537,7 +557,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
* always be deferring the ACC.
*/
- rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ remote_did = bf_get(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest);
+ else
+ remote_did = icmd->un.rcvels.remoteID;
+ rc = lpfc_reg_rpi(phba, vport->vpi, remote_did,
(uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
if (rc)
goto out;
@@ -571,7 +595,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* a default RPI.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- mempool_free(login_mbox, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+ MBOX_THD_UNLOCKED);
login_mbox = NULL;
} else {
/* In order to preserve RPIs, we want to cleanup
@@ -588,9 +613,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
- ndlp, login_mbox);
- if (rc)
- mempool_free(login_mbox, phba->mbox_mem_pool);
+ ndlp, login_mbox);
+ if (rc && login_mbox)
+ lpfc_mbox_rsrc_cleanup(phba, login_mbox,
+ MBOX_THD_UNLOCKED);
return 1;
}
@@ -611,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
+ if (!login_mbox->ctx_ndlp)
+ goto out;
+
login_mbox->context3 = save_iocb; /* For PLOGI ACC */
spin_lock_irq(&ndlp->lock);
@@ -619,8 +648,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Start the ball rolling by issuing REG_LOGIN here */
rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_nlp_put(ndlp);
goto out;
+ }
lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
return 1;
@@ -674,17 +705,17 @@ static int
lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *elsiocb;
struct lpfc_dmabuf *pcmd;
struct serv_parm *sp;
struct lpfc_name *pnn, *ppn;
struct ls_rjt stat;
ADISC *ap;
- IOCB_t *icmd;
uint32_t *lp;
uint32_t cmd;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
lp = (uint32_t *) pcmd->virt;
cmd = *lp++;
@@ -698,8 +729,8 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ppn = (struct lpfc_name *) & sp->portName;
}
- icmd = &cmdiocb->iocb;
- if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
+ if (get_job_ulpstatus(phba, cmdiocb) == 0 &&
+ lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
/*
* As soon as we send ACC, the remote NPort can
@@ -710,7 +741,6 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
GFP_KERNEL);
if (elsiocb) {
-
/* Save info from cmd IOCB used in rsp */
memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
sizeof(struct lpfc_iocbq));
@@ -804,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_nvmet_invalidate_host(phba, ndlp);
if (ndlp->nlp_DID == Fabric_DID) {
- if (vport->port_state <= LPFC_FDISC)
+ if (vport->port_state <= LPFC_FDISC ||
+ vport->fc_flag & FC_PT2PT)
goto out;
lpfc_linkdown_port(vport);
spin_lock_irq(shost->host_lock);
@@ -893,7 +924,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
uint32_t *payload;
uint32_t cmd;
- payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ payload = cmdiocb->cmd_dmabuf->virt;
cmd = *payload;
if (vport->phba->nvmet_support) {
/* Must be a NVME PRLI */
@@ -930,9 +961,9 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct fc_rport *rport = ndlp->rport;
u32 roles;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
- lp = (uint32_t *) pcmd->virt;
- npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
+ pcmd = cmdiocb->cmd_dmabuf;
+ lp = (uint32_t *)pcmd->virt;
+ npr = (PRLI *)((uint8_t *)lp + sizeof(uint32_t));
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
@@ -1078,8 +1109,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_nlp_put(ndlp);
mempool_free(pmb, phba->mbox_mem_pool);
+ }
}
}
@@ -1193,7 +1226,7 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb = arg;
- struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
uint32_t *lp = (uint32_t *) pcmd->virt;
struct serv_parm *sp = (struct serv_parm *) (lp + 1);
struct ls_rjt stat;
@@ -1303,29 +1336,30 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- struct lpfc_dmabuf *pcmd, *prsp, *mp;
+ struct lpfc_dmabuf *pcmd, *prsp;
uint32_t *lp;
uint32_t vid, flag;
- IOCB_t *irsp;
struct serv_parm *sp;
uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
int rc;
+ u32 ulp_status;
+ u32 did;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
/* Recovery from PLOGI collision logic */
return ndlp->nlp_state;
}
- irsp = &rspiocb->iocb;
-
- if (irsp->ulpStatus)
+ if (ulp_status)
goto out;
- pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+ pcmd = cmdiocb->cmd_dmabuf;
prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
if (!prsp)
@@ -1434,7 +1468,9 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
goto out;
}
- if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+ did = get_job_els_rsp64_did(phba, cmdiocb);
+
+ if (lpfc_reg_rpi(phba, vport->vpi, did,
(uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
switch (ndlp->nlp_DID) {
case NameServer_DID:
@@ -1467,11 +1503,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
* command
*/
lpfc_nlp_put(ndlp);
- mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(mbox, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0134 PLOGI: cannot issue reg_login "
"Data: x%x x%x x%x x%x\n",
@@ -1664,17 +1696,18 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
ADISC *ap;
int rc;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- irsp = &rspiocb->iocb;
- if ((irsp->ulpStatus) ||
+ if ((ulp_status) ||
(!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
/* 1 sec timeout */
mod_timer(&ndlp->nlp_delayfunc,
@@ -1821,7 +1854,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1841,16 +1873,11 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
list_del(&mb->list);
phba->sli.mboxq_cnt--;
- mempool_free(mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
}
}
spin_unlock_irq(&phba->hbalock);
@@ -1955,8 +1982,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
@@ -2116,13 +2144,15 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_hba *phba = vport->phba;
- IOCB_t *irsp;
PRLI *npr;
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
/* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
* format is different so NULL the two PRLI types so that the
@@ -2131,13 +2161,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
npr = NULL;
nvpr = NULL;
temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
- if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
+ if (cmdiocb->cmd_flag & LPFC_PRLI_FCP_REQ)
npr = (PRLI *) temp_ptr;
- else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
+ else if (cmdiocb->cmd_flag & LPFC_PRLI_NVME_REQ)
nvpr = (struct lpfc_nvme_prli *) temp_ptr;
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
+ if (ulp_status) {
if ((vport->port_type == LPFC_NPIV_PORT) &&
vport->cfg_restrict_login) {
goto out;
@@ -2736,16 +2765,18 @@ static uint32_t
lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus) {
+ if (ulp_status)
return NLP_STE_FREED_NODE;
- }
+
return ndlp->nlp_state;
}
@@ -2753,14 +2784,16 @@ static uint32_t
lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
@@ -2787,14 +2820,16 @@ static uint32_t
lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb, *rspiocb;
- IOCB_t *irsp;
+ u32 ulp_status;
cmdiocb = (struct lpfc_iocbq *) arg;
- rspiocb = cmdiocb->context_un.rsp_iocb;
+ rspiocb = cmdiocb->rsp_iocb;
+
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
- irsp = &rspiocb->iocb;
- if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
+ if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
lpfc_drop_node(vport, ndlp);
return NLP_STE_FREED_NODE;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 9601edd838e1..152245f7cacc 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+
+ if (!vport || vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
+
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
if (qhandle == NULL)
return -ENOMEM;
@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
return -EINVAL;
remoteport = lpfc_rport->remoteport;
- if (!vport->localport)
+ if (!vport->localport ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -EINVAL;
lport = vport->localport->private;
@@ -313,8 +319,10 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp;
uint32_t status;
- pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
- ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
+ pnvme_lsreq = cmdwqe->context_un.nvme_lsreq;
+ ndlp = cmdwqe->ndlp;
+ buf_ptr = cmdwqe->bpl_dmabuf;
+
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -324,16 +332,16 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
cmdwqe->sli4_xritag, status,
(wcqe->parameter & 0xffff),
- cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+ cmdwqe, pnvme_lsreq, cmdwqe->bpl_dmabuf,
+ ndlp);
lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
cmdwqe->sli4_xritag, status, wcqe->parameter);
- if (cmdwqe->context3) {
- buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
+ if (buf_ptr) {
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
- cmdwqe->context3 = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
}
if (pnvme_lsreq->done)
pnvme_lsreq->done(pnvme_lsreq, status);
@@ -345,18 +353,19 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
cmdwqe->sli4_xritag, status);
if (ndlp) {
lpfc_nlp_put(ndlp);
- cmdwqe->context1 = NULL;
+ cmdwqe->ndlp = NULL;
}
lpfc_sli_release_iocbq(phba, cmdwqe);
}
static void
lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
struct lpfc_nvme_lport *lport;
uint32_t status;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
@@ -380,7 +389,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
struct lpfc_dmabuf *inp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *),
+ struct lpfc_iocbq *),
struct lpfc_nodelist *ndlp, uint32_t num_entry,
uint32_t tmo, uint8_t retry)
{
@@ -400,19 +409,19 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Initialize only 64 bytes */
memset(wqe, 0, sizeof(union lpfc_wqe));
- genwqe->context3 = (uint8_t *)bmp;
- genwqe->iocb_flag |= LPFC_IO_NVME_LS;
+ genwqe->bpl_dmabuf = bmp;
+ genwqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Save for completion so we can release these resources */
- genwqe->context1 = lpfc_nlp_get(ndlp);
- if (!genwqe->context1) {
+ genwqe->ndlp = lpfc_nlp_get(ndlp);
+ if (!genwqe->ndlp) {
dev_warn(&phba->pcidev->dev,
"Warning: Failed node ref, not sending LS_REQ\n");
lpfc_sli_release_iocbq(phba, genwqe);
return 1;
}
- genwqe->context2 = (uint8_t *)pnvme_lsreq;
+ genwqe->context_un.nvme_lsreq = pnvme_lsreq;
/* Fill in payload, bp points to frame payload */
if (!tmo)
@@ -432,7 +441,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
first_len = xmit_len;
}
- genwqe->rsvd2 = num_entry;
+ genwqe->num_bdes = num_entry;
genwqe->hba_wqidx = 0;
/* Words 0 - 2 */
@@ -483,8 +492,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
/* Issue GEN REQ WQE for NPORT <did> */
- genwqe->wqe_cmpl = cmpl;
- genwqe->iocb_cmpl = NULL;
+ genwqe->cmd_cmpl = cmpl;
genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
genwqe->vport = vport;
genwqe->retry = retry;
@@ -534,7 +542,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe))
+ struct lpfc_iocbq *rspwqe))
{
struct lpfc_dmabuf *bmp;
struct ulp_bde64 *bpl;
@@ -559,6 +567,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_DID, ntype, nstate);
return -ENODEV;
}
+ if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+ return -ENODEV;
if (!vport->phba->sli4_hba.nvmels_wq)
return -ENOMEM;
@@ -662,7 +672,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
return -EINVAL;
vport = lport->vport;
- if (vport->load_flag & FC_UNLOADING)
+ if (vport->load_flag & FC_UNLOADING ||
+ vport->phba->hba_flag & HBA_IOQ_FLUSH)
return -ENODEV;
atomic_inc(&lport->fc4NvmeLsRequests);
@@ -721,8 +732,8 @@ __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(&phba->hbalock);
spin_lock(&pring->ring_lock);
list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
- if (wqe->context2 == pnvme_lsreq) {
- wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+ if (wqe->context_un.nvme_lsreq == pnvme_lsreq) {
+ wqe->cmd_flag |= LPFC_DRIVER_ABORTED;
foundit = true;
break;
}
@@ -906,7 +917,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
/*
- * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
+ * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
*
* Driver registers this routine as it io request handler. This
* routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
@@ -917,11 +928,11 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
* TODO: What are the failure codes.
**/
static void
-lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_iocbq *pwqeOut)
{
- struct lpfc_io_buf *lpfc_ncmd =
- (struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_io_buf *lpfc_ncmd = pwqeIn->io_buf;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
struct nvme_fc_ersp_iu *ep;
@@ -1054,25 +1065,37 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = wcqe->parameter;
nCmd->status = 0;
+ /* Get the NVME cmd details for this unique error. */
+ cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
+ ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
+
/* Check if this is really an ERSP */
if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
lpfc_ncmd->status = IOSTAT_SUCCESS;
lpfc_ncmd->result = 0;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6084 NVME Completion ERSP: "
- "xri %x placed x%x\n",
- lpfc_ncmd->cur_iocbq.sli4_xritag,
- wcqe->total_data_placed);
+ "6084 NVME FCP_ERR ERSP: "
+ "xri %x placed x%x opcode x%x cmd_id "
+ "x%x cqe_status x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
}
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6081 NVME Completion Protocol Error: "
"xri %x status x%x result x%x "
- "placed x%x\n",
+ "placed x%x opcode x%x cmd_id x%x, "
+ "cqe_status x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_ncmd->status, lpfc_ncmd->result,
- wcqe->total_data_placed);
+ wcqe->total_data_placed,
+ cp->sqe.common.opcode,
+ cp->sqe.common.command_id,
+ ep->cqe.status);
break;
case IOSTAT_LOCAL_REJECT:
/* Let fall through to set command final state. */
@@ -1184,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
- struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+ struct nvme_common_command *sqe;
+ struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
union lpfc_wqe128 *wqe = &pwqeq->wqe;
uint32_t req_len;
@@ -1241,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
cstat->control_requests++;
}
- if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
+ if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
+ sqe = &((struct nvme_fc_cmd_iu *)
+ nCmd->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_async_event)
+ bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
+ }
+
/*
* Finish initializing those WQE fields that are independent
* of the nvme_cmnd request_buffer
@@ -1268,6 +1298,19 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Words 13 14 15 are for PBDE support */
+ /* add the VMID tags as per switch response */
+ if (unlikely(lpfc_ncmd->cur_iocbq.cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_priority_tagging) {
+ bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
+ lpfc_ncmd->cur_iocbq.vmid_tag.cs_ctl_vmid);
+ } else {
+ bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
+ wqe->words[31] = lpfc_ncmd->cur_iocbq.vmid_tag.app_id;
+ }
+ }
+
pwqeq->vport = vport;
return 0;
}
@@ -1390,8 +1433,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
if ((nseg - 1) == i)
bf_set(lpfc_sli4_sge_last, sgl, 1);
- physaddr = data_sg->dma_address;
- dma_len = data_sg->length;
+ physaddr = sg_dma_address(data_sg);
+ dma_len = sg_dma_len(data_sg);
sgl->addr_lo = cpu_to_le32(
putPaddrLow(physaddr));
sgl->addr_hi = cpu_to_le32(
@@ -1493,6 +1536,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe;
uint64_t start = 0;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u8 *uuid = NULL;
+ int err;
+ enum dma_data_direction iodir;
+#endif
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1515,7 +1563,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
phba = vport->phba;
- if (unlikely(vport->load_flag & FC_UNLOADING)) {
+ if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+ phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6124 Fail IO, Driver unload\n");
atomic_inc(&lport->xmt_fcp_err);
@@ -1650,6 +1699,33 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->qidx = lpfc_queue_info->qidx;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ /* check the necessary and sufficient condition to support VMID */
+ if (lpfc_is_vmid_enabled(phba) &&
+ (ndlp->vmid_support ||
+ phba->pport->vmid_priority_tagging ==
+ LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
+ /* is the I/O generated by a VM, get the associated virtual */
+ /* entity id */
+ uuid = nvme_fc_io_getuuid(pnvme_fcreq);
+
+ if (uuid) {
+ if (pnvme_fcreq->io_dir == NVMEFC_FCP_WRITE)
+ iodir = DMA_TO_DEVICE;
+ else if (pnvme_fcreq->io_dir == NVMEFC_FCP_READ)
+ iodir = DMA_FROM_DEVICE;
+ else
+ iodir = DMA_NONE;
+
+ err = lpfc_vmid_get_appid(vport, uuid, iodir,
+ (union lpfc_vmid_io_tag *)
+ &lpfc_ncmd->cur_iocbq.vmid_tag);
+ if (!err)
+ lpfc_ncmd->cur_iocbq.cmd_flag |= LPFC_IO_VMID;
+ }
+ }
+#endif
+
/*
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
* This identfier was create in our hardware queue create callback
@@ -1730,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
* lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
- * @abts_cmpl: Pointer to wcqe complete object.
+ * @rspiocb: Pointer to response iocb object.
*
* This is the callback function for any NVME FCP IO that was aborted.
*
@@ -1739,15 +1815,16 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
**/
void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *abts_cmpl)
+ struct lpfc_iocbq *rspiocb)
{
+ struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"req_tag x%x, status x%x, hwstatus x%x\n",
- cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag,
+ bf_get(wqe_ctxt_tag, &cmdiocb->wqe.generic.wqe_com),
+ get_job_abtsiotag(phba, cmdiocb), cmdiocb->iotag,
bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
bf_get(lpfc_wcqe_c_status, abts_cmpl),
bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
@@ -1784,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
int ret_val;
+ struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1866,7 +1944,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
}
/* Don't abort IOs no longer on the pending queue. */
- if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (!(nvmereq_wqe->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6142 NVME IO req x%px not queued - skipping "
"abort req xri x%x\n",
@@ -1880,7 +1958,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
/* Outstanding abort is in progress */
- if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (nvmereq_wqe->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6144 Outstanding NVME I/O Abort Request "
"still pending on nvme_fcreq x%px, "
@@ -1907,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ /*
+ * Get Command Id from cmd to plug into response. This
+ * code is not needed in the next NVME Transport drop.
+ */
+ cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x\n",
- nvmereq_wqe->sli4_xritag);
+ "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
+ nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
+ cp->sqe.common.command_id);
return;
out_unlock:
@@ -1975,8 +2059,8 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- pwqeq->iocb_flag = LPFC_IO_NVME;
- pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
+ pwqeq->cmd_flag = LPFC_IO_NVME;
+ pwqeq->cmd_cmpl = lpfc_nvme_io_cmd_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
@@ -2169,8 +2253,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_nvme = 0;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
- if (!vport || !vport->localport ||
- !qp || !qp->io_wq)
+ if (!vport->localport || !qp || !qp->io_wq)
return;
pring = qp->io_wq->pring;
@@ -2180,8 +2263,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
abts_scsi += qp->abts_scsi_io_bufs;
abts_nvme += qp->abts_nvme_io_bufs;
}
- if (!vport || !vport->localport ||
- vport->phba->hba_flag & HBA_PCI_ERR)
+ if (!vport->localport ||
+ test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+ vport->load_flag & FC_UNLOADING)
return;
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2346,6 +2430,11 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
spin_lock_irq(&ndlp->lock);
+
+ /* If an oldrport exists, so does the ndlp reference. If not
+ * a new reference is needed because either the node has never
+ * been registered or it's been unregistered and getting deleted.
+ */
oldrport = lpfc_ndlp_get_nrport(ndlp);
if (oldrport) {
prev_ndlp = oldrport->ndlp;
@@ -2456,12 +2545,12 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!nrport || !remoteport)
goto rescan_exit;
- /* Only rescan if we are an NVME target in the MAPPED state */
+ /* Rescan an NVME target in MAPPED state with DISCOVERY role set */
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
nvme_fc_rescan_remoteport(remoteport);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6172 NVME rescanned DID x%06x "
"port_state x%x\n",
ndlp->nlp_DID, remoteport->port_state);
@@ -2541,8 +2630,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
* return values is ignored. The upcall is a courtesy to the
* transport.
*/
- if (vport->load_flag & FC_UNLOADING ||
- unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
+ if (vport->load_flag & FC_UNLOADING)
(void)nvme_fc_set_remoteport_devloss(remoteport, 0);
ret = nvme_fc_unregister_remoteport(remoteport);
@@ -2708,7 +2796,7 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_wcqe_complete wcqe;
struct lpfc_wcqe_complete *wcqep = &wcqe;
- lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+ lpfc_ncmd = pwqeIn->io_buf;
if (!lpfc_ncmd) {
lpfc_sli_release_iocbq(phba, pwqeIn);
return;
@@ -2736,12 +2824,14 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqep->word0 = 0;
bf_set(lpfc_wcqe_c_status, wcqep, stat);
wcqep->parameter = param;
+ wcqep->total_data_placed = 0;
wcqep->word3 = 0; /* xb is 0 */
/* Call release with XB=1 to queue the IO into the abort list. */
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
bf_set(lpfc_wcqe_c_xb, wcqep, 1);
- (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
+ memcpy(&pwqeIn->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ (pwqeIn->cmd_cmpl)(phba, pwqeIn, pwqeIn);
#endif
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cc54ffb5c205..733c277948c0 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -234,7 +234,7 @@ int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct nvmefc_ls_req *pnvme_lsreq,
void (*gen_req_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe));
+ struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
@@ -248,6 +248,6 @@ int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
struct nvmefc_ls_rsp *ls_rsp,
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe));
+ struct lpfc_iocbq *rspwqe));
void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
- struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
+ struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 731802527b81..f7cfac0da9b6 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -285,7 +285,7 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba,
* transmission of an NVME LS response.
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. The function frees memory resources used for the command
@@ -293,9 +293,10 @@ lpfc_nvmet_defer_release(struct lpfc_hba *phba,
**/
void
__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
- struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
+ struct lpfc_async_xchg_ctx *axchg = cmdwqe->context_un.axchg;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
uint32_t status, result;
@@ -316,9 +317,9 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
"6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
status, result, axchg->oxid);
- lpfc_nlp_put(cmdwqe->context1);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ lpfc_nlp_put(cmdwqe->ndlp);
+ cmdwqe->context_un.axchg = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
ls_rsp->done(ls_rsp);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -331,7 +332,7 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME LS commands
@@ -340,10 +341,11 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_nvmet_tgtport *tgtp;
uint32_t status, result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
if (!phba->targetport)
goto finish;
@@ -365,7 +367,7 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
}
finish:
- __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
+ __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, rspwqe);
}
/**
@@ -707,7 +709,7 @@ out:
* lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME FCP commands
@@ -715,17 +717,18 @@ out:
**/
static void
lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_nvmet_tgtport *tgtp;
struct nvmefc_tgt_fcp_req *rsp;
struct lpfc_async_xchg_ctx *ctxp;
- uint32_t status, result, op, start_clean, logerr;
+ uint32_t status, result, op, logerr;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
int id;
#endif
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
ctxp->flag &= ~LPFC_NVME_IO_INP;
rsp = &ctxp->hdlrctx.fcp_req;
@@ -817,9 +820,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
} else {
ctxp->entry_cnt++;
- start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
- memset(((char *)cmdwqe) + start_clean, 0,
- (sizeof(struct lpfc_iocbq) - start_clean));
+ memset_startat(cmdwqe, 0, cmd_flag);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
ctxp->ts_isr_data = cmdwqe->isr_timestamp;
@@ -862,7 +863,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
struct nvmefc_ls_rsp *ls_rsp,
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe))
+ struct lpfc_iocbq *rspwqe))
{
struct lpfc_hba *phba = axchg->phba;
struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
@@ -898,9 +899,9 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
}
/* Save numBdes for bpl2sgl */
- nvmewqeq->rsvd2 = 1;
+ nvmewqeq->num_bdes = 1;
nvmewqeq->hba_wqidx = 0;
- nvmewqeq->context3 = &dmabuf;
+ nvmewqeq->bpl_dmabuf = &dmabuf;
dmabuf.virt = &bpl;
bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
@@ -913,9 +914,8 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
* be referenced after it returns back to this routine.
*/
- nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
- nvmewqeq->iocb_cmpl = NULL;
- nvmewqeq->context2 = axchg;
+ nvmewqeq->cmd_cmpl = xmt_ls_rsp_cmp;
+ nvmewqeq->context_un.axchg = axchg;
lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
@@ -923,7 +923,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
/* clear to be sure there's no reference */
- nvmewqeq->context3 = NULL;
+ nvmewqeq->bpl_dmabuf = NULL;
if (rc == WQE_SUCCESS) {
/*
@@ -940,7 +940,7 @@ __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
rc = -ENXIO;
- lpfc_nlp_put(nvmewqeq->context1);
+ lpfc_nlp_put(nvmewqeq->ndlp);
out_free_buf:
/* Give back resources */
@@ -1072,10 +1072,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
- nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
- nvmewqeq->iocb_cmpl = NULL;
- nvmewqeq->context2 = ctxp;
- nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
+ nvmewqeq->cmd_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
+ nvmewqeq->context_un.axchg = ctxp;
+ nvmewqeq->cmd_flag |= LPFC_IO_NVMET;
ctxp->wqeq->hba_wqidx = rsp->hwqid;
lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
@@ -1118,8 +1117,8 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->oxid, rc);
ctxp->wqeq->hba_wqidx = 0;
- nvmewqeq->context2 = NULL;
- nvmewqeq->context3 = NULL;
+ nvmewqeq->context_un.axchg = NULL;
+ nvmewqeq->bpl_dmabuf = NULL;
rc = -EBUSY;
aerr:
return rc;
@@ -1275,7 +1274,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
* lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
* @phba: Pointer to HBA context object
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* This function is the completion handler for NVME LS requests.
* The function updates any states and statistics, then calls the
@@ -1283,8 +1282,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
**/
static void
lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
__lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
}
@@ -1581,14 +1581,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
"6406 Ran out of NVMET iocb/WQEs\n");
return -ENOMEM;
}
- ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+ ctx_buf->iocbq->cmd_flag = LPFC_IO_NVMET;
nvmewqe = ctx_buf->iocbq;
wqe = &nvmewqe->wqe;
/* Initialize WQE */
memset(wqe, 0, sizeof(union lpfc_wqe));
- ctx_buf->iocbq->context1 = NULL;
+ ctx_buf->iocbq->cmd_dmabuf = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
spin_unlock(&phba->sli4_hba.sgl_list_lock);
@@ -2023,12 +2023,14 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
&wq->wqfull_list, list) {
if (ctxp) {
/* Checking for a specific IO to flush */
- if (nvmewqeq->context2 == ctxp) {
+ if (nvmewqeq->context_un.axchg == ctxp) {
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock,
iflags);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep,
+ sizeof(*wcqep));
lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
- wcqep);
+ nvmewqeq);
return;
}
continue;
@@ -2036,7 +2038,8 @@ lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
/* Flush all IOs */
list_del(&nvmewqeq->list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
+ memcpy(&nvmewqeq->wcqe_cmpl, wcqep, sizeof(*wcqep));
+ lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
}
}
@@ -2066,7 +2069,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
+ ctxp = nvmewqeq->context_un.axchg;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
if (rc == -EBUSY) {
@@ -2612,10 +2615,10 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
ctxp->wqeq = nvmewqe;
/* prevent preparing wqe with NULL ndlp reference */
- nvmewqe->context1 = lpfc_nlp_get(ndlp);
- if (nvmewqe->context1 == NULL)
+ nvmewqe->ndlp = lpfc_nlp_get(ndlp);
+ if (!nvmewqe->ndlp)
goto nvme_wqe_free_wqeq_exit;
- nvmewqe->context2 = ctxp;
+ nvmewqe->context_un.axchg = ctxp;
wqe = &nvmewqe->wqe;
memset(wqe, 0, sizeof(union lpfc_wqe));
@@ -2676,7 +2679,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
- nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
+ nvmewqe->cmd_flag |= LPFC_IO_NVME_LS;
/* Xmit NVMET response to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
@@ -2687,8 +2690,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
return nvmewqe;
nvme_wqe_free_wqeq_exit:
- nvmewqe->context2 = NULL;
- nvmewqe->context3 = NULL;
+ nvmewqe->context_un.axchg = NULL;
+ nvmewqe->ndlp = NULL;
+ nvmewqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, nvmewqe);
return NULL;
}
@@ -2990,7 +2994,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->retry = 1;
nvmewqe->vport = phba->pport;
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
- nvmewqe->context1 = ndlp;
+ nvmewqe->ndlp = ndlp;
for_each_sg(rsp->sg, sgel, nsegs, i) {
physaddr = sg_dma_address(sgel);
@@ -3031,7 +3035,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
* lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
@@ -3039,15 +3043,16 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
**/
static void
lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
unsigned long flags;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3078,8 +3083,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
@@ -3100,7 +3105,7 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for FCP cmds
@@ -3108,15 +3113,16 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
unsigned long flags;
uint32_t result;
bool released = false;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
if (!ctxp) {
@@ -3162,8 +3168,8 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
/*
* if transport has released ctx, then can reuse it. Otherwise,
* will be recycled by transport release call.
@@ -3181,7 +3187,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
* lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
* @phba: Pointer to HBA context object.
* @cmdwqe: Pointer to driver command WQE object.
- * @wcqe: Pointer to driver response CQE object.
+ * @rspwqe: Pointer to driver response WQE object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for NVME ABTS for LS cmds
@@ -3189,13 +3195,14 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
**/
static void
lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *rspwqe)
{
struct lpfc_async_xchg_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
uint32_t result;
+ struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl;
- ctxp = cmdwqe->context2;
+ ctxp = cmdwqe->context_un.axchg;
result = wcqe->parameter;
if (phba->nvmet_support) {
@@ -3226,8 +3233,8 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
}
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
+ cmdwqe->rsp_dmabuf = NULL;
+ cmdwqe->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, cmdwqe);
kfree(ctxp);
}
@@ -3314,10 +3321,10 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
OTHER_COMMAND);
abts_wqeq->vport = phba->pport;
- abts_wqeq->context1 = ndlp;
- abts_wqeq->context2 = ctxp;
- abts_wqeq->context3 = NULL;
- abts_wqeq->rsvd2 = 0;
+ abts_wqeq->ndlp = ndlp;
+ abts_wqeq->context_un.axchg = ctxp;
+ abts_wqeq->bpl_dmabuf = NULL;
+ abts_wqeq->num_bdes = 0;
/* hba_wqidx should already be setup from command we are aborting */
abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
abts_wqeq->iocb.ulpLe = 1;
@@ -3328,46 +3335,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
return 1;
}
-/**
- * lpfc_nvmet_prep_abort_wqe - set up 'abort' work queue entry.
- * @pwqeq: Pointer to command iocb.
- * @xritag: Tag that uniqely identifies the local exchange resource.
- * @opt: Option bits -
- * bit 0 = inhibit sending abts on the link
- *
- * This function is called with hbalock held.
- **/
-static void
-lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
-{
- union lpfc_wqe128 *wqe = &pwqeq->wqe;
-
- /* WQEs are reused. Clear stale data and set key fields to
- * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
- */
- memset(wqe, 0, sizeof(*wqe));
-
- if (opt & INHIBIT_ABORT)
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- /* Abort specified xri tag, with the mask deliberately zeroed */
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
-
- /* Abort the I/O associated with this outstanding exchange ID. */
- wqe->abort_cmd.wqe_com.abort_tag = xritag;
-
- /* iotag for the wqe completion. */
- bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
-
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
-
- bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
- bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
-}
-
static int
lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_async_xchg_ctx *ctxp,
@@ -3377,7 +3344,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
struct lpfc_iocbq *abts_wqeq;
struct lpfc_nodelist *ndlp;
unsigned long flags;
- u8 opt;
+ bool ia;
int rc;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -3417,7 +3384,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
abts_wqeq = ctxp->abort_wqeq;
ctxp->state = LPFC_NVME_STE_ABORT;
- opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
+ ia = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? true : false;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
/* Announce entry to new IO submit field. */
@@ -3446,7 +3413,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
/* Outstanding abort is in progress */
- if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (abts_wqeq->cmd_flag & LPFC_DRIVER_ABORTED) {
spin_unlock_irqrestore(&phba->hbalock, flags);
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -3461,16 +3428,17 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
}
/* Ready - mark outstanding as aborted by driver. */
- abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ abts_wqeq->cmd_flag |= LPFC_DRIVER_ABORTED;
- lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
+ lpfc_sli_prep_abort_xri(phba, abts_wqeq, ctxp->wqeq->sli4_xritag,
+ abts_wqeq->iotag, CLASS3,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, true);
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME;
- abts_wqeq->context2 = ctxp;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME;
+ abts_wqeq->context_un.axchg = ctxp;
abts_wqeq->vport = phba->pport;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
@@ -3526,9 +3494,8 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq = ctxp->wqeq;
- abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVMET;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
@@ -3612,9 +3579,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
}
spin_lock_irqsave(&phba->hbalock, flags);
- abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
- abts_wqeq->iocb_cmpl = NULL;
- abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
+ abts_wqeq->cmd_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
+ abts_wqeq->cmd_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
@@ -3625,8 +3591,8 @@ lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
out:
if (tgtp)
atomic_inc(&tgtp->xmt_abort_rsp_error);
- abts_wqeq->context2 = NULL;
- abts_wqeq->context3 = NULL;
+ abts_wqeq->rsp_dmabuf = NULL;
+ abts_wqeq->bpl_dmabuf = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6ccf573acdec..7a1563564df7 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -87,14 +87,6 @@ static void
lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
static int
lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp);
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag);
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid);
/**
* lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
@@ -120,62 +112,6 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
#define LPFC_INVALID_REFTAG ((u32)-1)
/**
- * lpfc_update_stats - Update statistical data for the command completion
- * @vport: The virtual port on which this call is executing.
- * @lpfc_cmd: lpfc scsi command object pointer.
- *
- * This function is called when there is a command completion and this
- * function updates the statistical data for the command completion.
- **/
-static void
-lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
-{
- struct lpfc_hba *phba = vport->phba;
- struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- unsigned long flags;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- unsigned long latency;
- int i;
-
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- (cmd->result))
- return;
-
- latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
- rdata = lpfc_cmd->rdata;
- pnode = rdata->pnode;
-
- spin_lock_irqsave(shost->host_lock, flags);
- if (!pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
- spin_unlock_irqrestore(shost->host_lock, flags);
- return;
- }
-
- if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
- i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
- phba->bucket_step;
- /* check array subscript bounds */
- if (i < 0)
- i = 0;
- else if (i >= LPFC_MAX_BUCKET_COUNT)
- i = LPFC_MAX_BUCKET_COUNT - 1;
- } else {
- for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
- if (latency <= (phba->bucket_base +
- ((1<<i)*phba->bucket_step)))
- break;
- }
-
- pnode->lat_data[i].cmd_count++;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
-/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
*
@@ -362,7 +298,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
kfree(psb);
break;
}
- psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
psb->fcp_cmnd = psb->data;
psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
@@ -433,7 +369,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
iocb->ulpClass = CLASS3;
psb->status = IOSTAT_SUCCESS;
/* Put it back into the SCSI buffer list */
- psb->cur_iocbq.context1 = psb;
+ psb->cur_iocbq.io_buf = psb;
spin_lock_init(&psb->buf_lock);
lpfc_release_scsi_buf_s3(phba, psb);
@@ -468,7 +404,7 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
spin_lock(&qp->abts_io_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
&qp->lpfc_abts_io_buf_list, list) {
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
continue;
if (psb->rdata && psb->rdata->pnode &&
@@ -524,7 +460,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
list_del_init(&psb->list);
psb->flags &= ~LPFC_SBUF_XBUSY;
psb->status = IOSTAT_SUCCESS;
- if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
+ if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
qp->abts_nvme_io_bufs--;
spin_unlock(&qp->abts_io_buf_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -571,7 +507,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
* for command completion wake up the thread.
*/
spin_lock_irqsave(&psb->buf_lock, iflag);
- psb->cur_iocbq.iocb_flag &=
+ psb->cur_iocbq.cmd_flag &=
~LPFC_DRIVER_ABORTED;
if (psb->waitq)
wake_up(psb->waitq);
@@ -593,8 +529,8 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- (iocbq->iocb_flag & LPFC_IO_LIBDFC))
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ (iocbq->cmd_flag & LPFC_IO_LIBDFC))
continue;
if (iocbq->sli4_xritag != xri)
continue;
@@ -695,7 +631,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
/* Setup key fields in buffer that may have been changed
* if other protocols used this buffer.
*/
- lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
lpfc_cmd->prot_seg_cnt = 0;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->timeout = 0;
@@ -783,7 +719,7 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
psb->pCmd = NULL;
- psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+ psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
}
@@ -931,7 +867,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
physaddr = sg_dma_address(sgel);
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
+ !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -959,7 +895,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
- !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
+ !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/*
* The extended IOCB format can only fit 3 BDE or a BPL.
@@ -2942,154 +2878,58 @@ out:
* -1 - Internal error (bad profile, ...etc)
*/
static int
-lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_wcqe_complete *wcqe)
+lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
+ struct lpfc_iocbq *pIocbOut)
{
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+ struct sli3_bg_fields *bgf;
int ret = 0;
- u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
+ struct lpfc_wcqe_complete *wcqe;
+ u32 status;
u32 bghm = 0;
u32 bgstat = 0;
u64 failing_sector = 0;
- if (status == CQE_STATUS_DI_ERROR) {
- if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
- bgstat |= BGS_GUARD_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* AppTag Check failed */
- bgstat |= BGS_APPTAG_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* RefTag Check failed */
- bgstat |= BGS_REFTAG_ERR_MASK;
-
- /* Check to see if there was any good data before the error */
- if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
- bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
- bghm = wcqe->total_data_placed;
- }
-
- /*
- * Set ALL the error bits to indicate we don't know what
- * type of error it is.
- */
- if (!bgstat)
- bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
- BGS_GUARD_ERR_MASK);
- }
-
- if (lpfc_bgs_get_guard_err(bgstat)) {
- ret = 1;
-
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
- set_host_byte(cmd, DID_ABORT);
- phba->bg_guard_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9059 BLKGRD: Guard Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_reftag_err(bgstat)) {
- ret = 1;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ wcqe = &pIocbOut->wcqe_cmpl;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
- set_host_byte(cmd, DID_ABORT);
+ if (status == CQE_STATUS_DI_ERROR) {
+ /* Guard Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
+ bgstat |= BGS_GUARD_ERR_MASK;
- phba->bg_reftag_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9060 BLKGRD: Ref Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_apptag_err(bgstat)) {
- ret = 1;
+ /* AppTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
+ bgstat |= BGS_APPTAG_ERR_MASK;
- scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
- set_host_byte(cmd, DID_ABORT);
+ /* RefTag Check failed */
+ if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
+ bgstat |= BGS_REFTAG_ERR_MASK;
- phba->bg_apptag_err_cnt++;
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9062 BLKGRD: App Tag error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
- }
-
- if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
- /*
- * setup sense data descriptor 0 per SPC-4 as an information
- * field, and put the failing LBA in it.
- * This code assumes there was also a guard/app/ref tag error
- * indication.
- */
- cmd->sense_buffer[7] = 0xc; /* Additional sense length */
- cmd->sense_buffer[8] = 0; /* Information descriptor type */
- cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
- cmd->sense_buffer[10] = 0x80; /* Validity bit */
+ /* Check to see if there was any good data before the
+ * error
+ */
+ if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
+ bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
+ bghm = wcqe->total_data_placed;
+ }
- /* bghm is a "on the wire" FC frame based count */
- switch (scsi_get_prot_op(cmd)) {
- case SCSI_PROT_READ_INSERT:
- case SCSI_PROT_WRITE_STRIP:
- bghm /= cmd->device->sector_size;
- break;
- case SCSI_PROT_READ_STRIP:
- case SCSI_PROT_WRITE_INSERT:
- case SCSI_PROT_READ_PASS:
- case SCSI_PROT_WRITE_PASS:
- bghm /= (cmd->device->sector_size +
- sizeof(struct scsi_dif_tuple));
- break;
+ /*
+ * Set ALL the error bits to indicate we don't know what
+ * type of error it is.
+ */
+ if (!bgstat)
+ bgstat |= (BGS_REFTAG_ERR_MASK |
+ BGS_APPTAG_ERR_MASK |
+ BGS_GUARD_ERR_MASK);
}
- failing_sector = scsi_get_lba(cmd);
- failing_sector += bghm;
-
- /* Descriptor Information */
- put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
- }
-
- if (!ret) {
- /* No error was reported - problem in FW? */
- lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
- "9068 BLKGRD: Unknown error in cmd"
- " 0x%x lba 0x%llx blk cnt 0x%x "
- "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
- (unsigned long long)scsi_get_lba(cmd),
- scsi_logical_block_count(cmd), bgstat, bghm);
-
- /* Calculate what type of error it was */
- lpfc_calc_bg_err(phba, lpfc_cmd);
+ } else {
+ bgf = &pIocbOut->iocb.unsli3.sli3_bg;
+ bghm = bgf->bghm;
+ bgstat = bgf->bgstat;
}
- return ret;
-}
-
-/*
- * This function checks for BlockGuard errors detected by
- * the HBA. In case of errors, the ASC/ASCQ fields in the
- * sense buffer will be set accordingly, paired with
- * ILLEGAL_REQUEST to signal to the kernel that the HBA
- * detected corruption.
- *
- * Returns:
- * 0 - No error found
- * 1 - BlockGuard error found
- * -1 - Internal error (bad profile, ...etc)
- */
-static int
-lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
- struct lpfc_iocbq *pIocbOut)
-{
- struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
- struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
- int ret = 0;
- uint32_t bghm = bgf->bghm;
- uint32_t bgstat = bgf->bgstat;
- uint64_t failing_sector = 0;
if (lpfc_bgs_get_invalid_prof(bgstat)) {
cmd->result = DID_ERROR << 16;
@@ -3117,7 +2957,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_guard_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
set_host_byte(cmd, DID_ABORT);
phba->bg_guard_err_cnt++;
@@ -3131,10 +2970,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_reftag_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
set_host_byte(cmd, DID_ABORT);
-
phba->bg_reftag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9056 BLKGRD: Ref Tag error in cmd "
@@ -3146,10 +2983,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
if (lpfc_bgs_get_apptag_err(bgstat)) {
ret = 1;
-
scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
set_host_byte(cmd, DID_ABORT);
-
phba->bg_apptag_err_cnt++;
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
"9061 BLKGRD: App Tag error in cmd "
@@ -3434,7 +3269,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled) {
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->priority;
@@ -3591,15 +3426,15 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
switch (scsi_get_prot_op(scsi_cmnd)) {
case SCSI_PROT_WRITE_STRIP:
case SCSI_PROT_READ_STRIP:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
break;
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_INSERT:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
break;
case SCSI_PROT_WRITE_PASS:
case SCSI_PROT_READ_PASS:
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
break;
}
@@ -3630,7 +3465,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
*/
if ((phba->cfg_fof) && ((struct lpfc_device_data *)
scsi_cmnd->device->hostdata)->oas_enabled) {
- lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+ lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
/* Word 10 */
bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
@@ -3640,14 +3475,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
}
/* Word 7. DIF Flags */
- if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
+ if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
- else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
- else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
+ else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
return 0;
@@ -3936,7 +3771,7 @@ lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
else
time = div_u64(time + 500, 1000); /* round it */
- cgs = this_cpu_ptr(phba->cmf_stat);
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
atomic64_add(size, &cgs->rcv_bytes);
atomic64_add(time, &cgs->rx_latency);
atomic_inc(&cgs->rx_io_cnt);
@@ -3980,7 +3815,7 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
atomic_set(&phba->rx_max_read_cnt, size);
}
- cgs = this_cpu_ptr(phba->cmf_stat);
+ cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
atomic64_add(size, &cgs->total_bytes);
return 0;
}
@@ -4173,7 +4008,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
* lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
* @phba: The hba for which this call is being executed.
* @pwqeIn: The command WQE for the scsi cmnd.
- * @wcqe: Pointer to driver response CQE object.
+ * @pwqeOut: Pointer to driver response WQE object.
*
* This routine assigns scsi command result by looking into response WQE
* status field appropriately. This routine handles QUEUE FULL condition as
@@ -4181,10 +4016,10 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
**/
static void
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
- struct lpfc_wcqe_complete *wcqe)
+ struct lpfc_iocbq *pwqeOut)
{
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *)pwqeIn->context1;
+ struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
+ struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
struct lpfc_vport *vport = pwqeIn->vport;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
@@ -4194,7 +4029,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct Scsi_Host *shost;
u32 logit = LOG_FCP;
u32 status, idx;
- unsigned long iflags = 0;
u32 lat;
u8 wait_xb_clr = 0;
@@ -4209,30 +4043,16 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
rdata = lpfc_cmd->rdata;
ndlp = rdata->pnode;
- if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- /* TOREMOVE - currently this flag is checked during
- * the release of lpfc_iocbq. Remove once we move
- * to lpfc_wqe_job construct.
- *
- * This needs to be done outside buf_lock
- */
- spin_lock_irqsave(&phba->hbalock, iflags);
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-
- /* Guard against abort handler being called at same time */
- spin_lock(&lpfc_cmd->buf_lock);
-
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
if (!cmd) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"9042 I/O completion: Not an active IO\n");
- spin_unlock(&lpfc_cmd->buf_lock);
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
}
+ /* Guard against abort handler being called at same time */
+ spin_lock(&lpfc_cmd->buf_lock);
idx = lpfc_cmd->cur_iocbq.hba_wqidx;
if (phba->sli4_hba.hdwq)
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
@@ -4391,10 +4211,12 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
break;
}
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
+ lpfc_cmd->result == IOERR_LINK_DOWN ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4406,12 +4228,14 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* This is a response for a BG enabled
* cmd. Parse BG error
*/
- lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
- wcqe);
+ lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
break;
+ } else {
+ lpfc_printf_vlog(vport, KERN_WARNING,
+ LOG_BG,
+ "9040 non-zero BGSTAT "
+ "on unprotected cmd\n");
}
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
- "9040 non-zero BGSTAT on unprotected cmd\n");
}
lpfc_printf_vlog(vport, KERN_WARNING, logit,
"9036 Local Reject FCP cmd x%x failed"
@@ -4448,14 +4272,13 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"9039 Iodone <%d/%llu> cmd x%px, error "
- "x%x SNS x%x x%x Data: x%x x%x\n",
+ "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
- cmd->result, *lp, *(lp + 3), cmd->retries,
- scsi_get_resid(cmd));
+ cmd->result, *lp, *(lp + 3),
+ (u64)scsi_get_lba(cmd),
+ cmd->retries, scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
-
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4508,7 +4331,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4531,7 +4354,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut)
{
struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *) pIocbIn->context1;
+ (struct lpfc_io_buf *) pIocbIn->io_buf;
struct lpfc_vport *vport = pIocbIn->vport;
struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
struct lpfc_nodelist *pnode = rdata->pnode;
@@ -4568,7 +4391,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
/* pick up SLI4 exchange busy status from HBA */
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
- if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
+ if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -4681,7 +4504,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4736,7 +4559,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
scsi_get_resid(cmd));
}
- lpfc_update_stats(vport, lpfc_cmd);
if (vport->cfg_max_scsicmpl_time &&
time_after(jiffies, lpfc_cmd->start_time +
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4777,7 +4599,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
* wake up the thread.
*/
spin_lock(&lpfc_cmd->buf_lock);
- lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
if (lpfc_cmd->waitq)
wake_up(lpfc_cmd->waitq);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -4854,9 +4676,9 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
piocbq->iocb.ulpFCP2Rcvy = 0;
piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
- piocbq->context1 = lpfc_cmd;
- if (!piocbq->iocb_cmpl)
- piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
+ piocbq->io_buf = lpfc_cmd;
+ if (!piocbq->cmd_cmpl)
+ piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
piocbq->iocb.ulpTimeout = tmo;
piocbq->vport = vport;
return 0;
@@ -4966,10 +4788,9 @@ static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
pwqeq->vport = vport;
- pwqeq->vport = vport;
- pwqeq->context1 = lpfc_cmd;
+ pwqeq->io_buf = lpfc_cmd;
pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
- pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
+ pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
return 0;
}
@@ -5016,7 +4837,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
}
/**
- * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
+ * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
* @vport: The virtual port for which this call is being executed.
* @lpfc_cmd: Pointer to lpfc_io_buf data structure.
* @lun: Logical unit number.
@@ -5030,10 +4851,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
* 1 - Success
**/
static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
- struct lpfc_io_buf *lpfc_cmd,
- uint64_t lun,
- uint8_t task_mgmt_cmd)
+lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
{
struct lpfc_iocbq *piocbq;
IOCB_t *piocb;
@@ -5054,15 +4874,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
- if (vport->phba->sli_rev == 3 &&
- !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+ if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
piocb->ulpCommand = CMD_FCP_ICMND64_CR;
piocb->ulpContext = ndlp->nlp_rpi;
- if (vport->phba->sli_rev == LPFC_SLI_REV4) {
- piocb->ulpContext =
- vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- }
piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
piocb->ulpPU = 0;
@@ -5078,8 +4893,79 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
} else
piocb->ulpTimeout = lpfc_cmd->timeout;
- if (vport->phba->sli_rev == LPFC_SLI_REV4)
- lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
+ return 1;
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ * 0 - Error
+ * 1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
+ struct lpfc_io_buf *lpfc_cmd,
+ u64 lun, u8 task_mgmt_cmd)
+{
+ struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &pwqeq->wqe;
+ struct fcp_cmnd *fcp_cmnd;
+ struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
+ struct lpfc_nodelist *ndlp = rdata->pnode;
+
+ if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
+ return 0;
+
+ pwqeq->vport = vport;
+ /* Initialize 64 bytes only */
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
+
+ /* From the icmnd template, initialize words 4 - 11 */
+ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
+ sizeof(uint32_t) * 8);
+
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+ /* Clear out any old data in the FCP command area */
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
+ fcp_cmnd->fcpCntl3 = 0;
+ fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
+
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+ bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */
+ vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
+ ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
+ bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
+ (ndlp->nlp_fcp_info & 0x0f));
+
+ /* ulpTimeout is only one byte */
+ if (lpfc_cmd->timeout > 0xff) {
+ /*
+ * Do not timeout the command at the firmware level.
+ * The driver will provide the timeout mechanism.
+ */
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
+ } else {
+ bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
+ }
+
+ lpfc_prep_embed_io(vport->phba, lpfc_cmd);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
return 1;
}
@@ -5106,6 +4992,8 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
@@ -5113,6 +5001,8 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
+ phba->lpfc_scsi_prep_task_mgmt_cmd =
+ lpfc_scsi_prep_task_mgmt_cmd_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -5139,8 +5029,7 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *) cmdiocbq->context1;
+ struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
if (lpfc_cmd)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return;
@@ -5315,253 +5204,6 @@ void lpfc_poll_timeout(struct timer_list *t)
}
/*
- * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash: calculated hash value
- * @buf: uuid associated with the VE
- * Return the VMID entry associated with the UUID
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
- u32 hash, u8 *buf)
-{
- struct lpfc_vmid *vmp;
-
- hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
- if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
- return vmp;
- }
- return NULL;
-}
-
-/*
- * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @hash - calculated hash value
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- *
- * This routine will insert the newly acquired VMID entity in the hash table.
- * Make sure to acquire the appropriate lock before invoking this routine.
- */
-static void
-lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
- struct lpfc_vmid *vmp)
-{
- hash_add(vport->hash_table, &vmp->hnode, hash);
-}
-
-/*
- * lpfc_vmid_hash_fn - create a hash value of the UUID
- * @vmid: uuid associated with the VE
- * @len: length of the VMID string
- * Returns the calculated hash value
- */
-int lpfc_vmid_hash_fn(const char *vmid, int len)
-{
- int c;
- int hash = 0;
-
- if (len == 0)
- return 0;
- while (len--) {
- c = *vmid++;
- if (c >= 'A' && c <= 'Z')
- c += 'a' - 'A';
-
- hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
- (c >> LPFC_VMID_HASH_SHIFT)) * 19;
- }
-
- return hash & LPFC_VMID_HASH_MASK;
-}
-
-/*
- * lpfc_vmid_update_entry - update the vmid entry in the hash table
- * @vport: The virtual port for which this call is being executed.
- * @cmd: address of scsi cmd descriptor
- * @vmp: Pointer to a VMID entry representing a VM sending I/O
- * @tag: VMID tag
- */
-static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
- *cmd, struct lpfc_vmid *vmp,
- union lpfc_vmid_io_tag *tag)
-{
- u64 *lta;
-
- if (vport->vmid_priority_tagging)
- tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
- else
- tag->app_id = vmp->un.app_id;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- vmp->io_wr_cnt++;
- else
- vmp->io_rd_cnt++;
-
- /* update the last access timestamp in the table */
- lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
- *lta = jiffies;
-}
-
-static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
- struct lpfc_vmid *vmid)
-{
- u32 hash;
- struct lpfc_vmid *pvmid;
-
- if (vport->port_type == LPFC_PHYSICAL_PORT) {
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- } else {
- hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
- pvmid =
- lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
- vmid->host_vmid);
- if (pvmid)
- vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
- else
- vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
- }
-}
-
-/*
- * lpfc_vmid_get_appid - get the VMID associated with the UUID
- * @vport: The virtual port for which this call is being executed.
- * @uuid: UUID associated with the VE
- * @cmd: address of scsi_cmd descriptor
- * @tag: VMID tag
- * Returns status of the function
- */
-static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
- scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
-{
- struct lpfc_vmid *vmp = NULL;
- int hash, len, rc, i;
-
- /* check if QFPA is complete */
- if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
- LPFC_VMID_QFPA_CMPL)) {
- vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
- return -EAGAIN;
- }
-
- /* search if the UUID has already been mapped to the VMID */
- len = strlen(uuid);
- hash = lpfc_vmid_hash_fn(uuid, len);
-
- /* search for the VMID in the table */
- read_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* if found, check if its already registered */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- read_unlock(&vport->vmid_lock);
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- rc = 0;
- } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
- vmp->flag & LPFC_VMID_DE_REGISTER)) {
- /* else if register or dereg request has already been sent */
- /* Hence VMID tag will not be added for this I/O */
- read_unlock(&vport->vmid_lock);
- rc = -EBUSY;
- } else {
- /* The VMID was not found in the hashtable. At this point, */
- /* drop the read lock first before proceeding further */
- read_unlock(&vport->vmid_lock);
- /* start the process to obtain one as per the */
- /* type of the VMID indicated */
- write_lock(&vport->vmid_lock);
- vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
-
- /* while the read lock was released, in case the entry was */
- /* added by other context or is in process of being added */
- if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
- lpfc_vmid_update_entry(vport, cmd, vmp, tag);
- write_unlock(&vport->vmid_lock);
- return 0;
- } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
- write_unlock(&vport->vmid_lock);
- return -EBUSY;
- }
-
- /* else search and allocate a free slot in the hash table */
- if (vport->cur_vmid_cnt < vport->max_vmid) {
- for (i = 0; i < vport->max_vmid; i++) {
- vmp = vport->vmid + i;
- if (vmp->flag == LPFC_VMID_SLOT_FREE)
- break;
- }
- if (i == vport->max_vmid)
- vmp = NULL;
- } else {
- vmp = NULL;
- }
-
- if (!vmp) {
- write_unlock(&vport->vmid_lock);
- return -ENOMEM;
- }
-
- /* Add the vmid and register */
- lpfc_put_vmid_in_hashtable(vport, hash, vmp);
- vmp->vmid_len = len;
- memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
- vmp->io_rd_cnt = 0;
- vmp->io_wr_cnt = 0;
- vmp->flag = LPFC_VMID_SLOT_USED;
-
- vmp->delete_inactive =
- vport->vmid_inactivity_timeout ? 1 : 0;
-
- /* if type priority tag, get next available VMID */
- if (lpfc_vmid_is_type_priority_tag(vport))
- lpfc_vmid_assign_cs_ctl(vport, vmp);
-
- /* allocate the per cpu variable for holding */
- /* the last access time stamp only if VMID is enabled */
- if (!vmp->last_io_time)
- vmp->last_io_time = __alloc_percpu(sizeof(u64),
- __alignof__(struct
- lpfc_vmid));
- if (!vmp->last_io_time) {
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- write_unlock(&vport->vmid_lock);
-
- /* complete transaction with switch */
- if (lpfc_vmid_is_type_priority_tag(vport))
- rc = lpfc_vmid_uvem(vport, vmp, true);
- else
- rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
- if (!rc) {
- write_lock(&vport->vmid_lock);
- vport->cur_vmid_cnt++;
- vmp->flag |= LPFC_VMID_REQ_REGISTER;
- write_unlock(&vport->vmid_lock);
- } else {
- write_lock(&vport->vmid_lock);
- hash_del(&vmp->hnode);
- vmp->flag = LPFC_VMID_SLOT_FREE;
- free_percpu(vmp->last_io_time);
- write_unlock(&vport->vmid_lock);
- return -EIO;
- }
-
- /* finally, enable the idle timer once */
- if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
- mod_timer(&vport->phba->inactive_vmid_poll,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
- vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
- }
- }
- return rc;
-}
-
-/*
* lpfc_is_command_vm_io - get the UUID from blk cgroup
* @cmd: Pointer to scsi_cmnd data structure
* Returns UUID if present, otherwise NULL
@@ -5570,7 +5212,9 @@ static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
{
struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
- return bio ? blkcg_get_fc_appid(bio) : NULL;
+ if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
+ return NULL;
+ return blkcg_get_fc_appid(bio);
}
/**
@@ -5591,6 +5235,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ struct lpfc_iocbq *cur_iocbq = NULL;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *ndlp;
struct lpfc_io_buf *lpfc_cmd;
@@ -5684,6 +5329,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
lpfc_cmd->rx_cmd_start = start;
+ cur_iocbq = &lpfc_cmd->cur_iocbq;
/*
* Store the midlayer's command structure for the completion phase
* and complete the command initialization.
@@ -5691,7 +5337,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_cmd->pCmd = cmnd;
lpfc_cmd->rdata = rdata;
lpfc_cmd->ndlp = ndlp;
- lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
+ cur_iocbq->cmd_cmpl = NULL;
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
@@ -5733,7 +5379,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
goto out_host_busy_free_buf;
}
-
/* check the necessary and sufficient condition to support VMID */
if (lpfc_is_vmid_enabled(phba) &&
(ndlp->vmid_support ||
@@ -5744,22 +5389,21 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
uuid = lpfc_is_command_vm_io(cmnd);
if (uuid) {
- err = lpfc_vmid_get_appid(vport, uuid, cmnd,
- (union lpfc_vmid_io_tag *)
- &lpfc_cmd->cur_iocbq.vmid_tag);
+ err = lpfc_vmid_get_appid(vport, uuid,
+ cmnd->sc_data_direction,
+ (union lpfc_vmid_io_tag *)
+ &cur_iocbq->vmid_tag);
if (!err)
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
+ cur_iocbq->cmd_flag |= LPFC_IO_VMID;
}
}
- atomic_inc(&ndlp->cmd_pending);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
/* Issue I/O to adapter */
- err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
- &lpfc_cmd->cur_iocbq,
+ err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
SLI_IOCB_RET_IOCB);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (start) {
@@ -5772,25 +5416,25 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "3376 FCP could not issue IOCB err %x "
- "FCP cmd x%x <%d/%llu> "
- "sid: x%x did: x%x oxid: x%x "
- "Data: x%x x%x x%x x%x\n",
- err, cmnd->cmnd[0],
- cmnd->device ? cmnd->device->id : 0xffff,
- cmnd->device ? cmnd->device->lun : (u64)-1,
- vport->fc_myDID, ndlp->nlp_DID,
- phba->sli_rev == LPFC_SLI_REV4 ?
- lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
- phba->sli_rev == LPFC_SLI_REV4 ?
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
- lpfc_cmd->cur_iocbq.iocb.ulpContext,
- lpfc_cmd->cur_iocbq.iotag,
- phba->sli_rev == LPFC_SLI_REV4 ?
- bf_get(wqe_tmo,
- &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
- lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
- (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
+ "3376 FCP could not issue iocb err %x "
+ "FCP cmd x%x <%d/%llu> "
+ "sid: x%x did: x%x oxid: x%x "
+ "Data: x%x x%x x%x x%x\n",
+ err, cmnd->cmnd[0],
+ cmnd->device ? cmnd->device->id : 0xffff,
+ cmnd->device ? cmnd->device->lun : (u64)-1,
+ vport->fc_myDID, ndlp->nlp_DID,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ cur_iocbq->sli4_xritag : 0xffff,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
+ cur_iocbq->iocb.ulpContext,
+ cur_iocbq->iotag,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ bf_get(wqe_tmo,
+ &cur_iocbq->wqe.generic.wqe_com) :
+ cur_iocbq->iocb.ulpTimeout,
+ (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
goto out_host_busy_free_buf;
}
@@ -5886,6 +5530,7 @@ static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocb;
@@ -5897,7 +5542,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
@@ -5905,25 +5550,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (!lpfc_cmd)
return ret;
- spin_lock_irqsave(&phba->hbalock, flags);
+ /* Guard against IO completion being called at same time */
+ spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
+
+ spin_lock(&phba->hbalock);
/* driver queued commands are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3168 SCSI Layer abort requested I/O has been "
"flushed by LLD.\n");
ret = FAILED;
- goto out_unlock;
+ goto out_unlock_hba;
}
- /* Guard against IO completion being called at same time */
- spin_lock(&lpfc_cmd->buf_lock);
-
if (!lpfc_cmd->pCmd) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
"x%x ID %d LUN %llu\n",
SUCCESS, cmnd->device->id, cmnd->device->lun);
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
iocb = &lpfc_cmd->cur_iocbq;
@@ -5931,12 +5576,12 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
if (!pring_s4) {
ret = FAILED;
- goto out_unlock_buf;
+ goto out_unlock_hba;
}
spin_lock(&pring_s4->ring_lock);
}
/* the command is in process of being cancelled */
- if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3169 SCSI Layer abort requested I/O has been "
"cancelled by LLD.\n");
@@ -5956,16 +5601,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
goto out_unlock_ring;
}
- BUG_ON(iocb->context1 != lpfc_cmd);
+ WARN_ON(iocb->io_buf != lpfc_cmd);
/* abort issued in recovery is still in progress */
- if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
+ if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"3389 SCSI Layer I/O Abort Request is pending\n");
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
goto wait_for_cmpl;
}
@@ -5973,7 +5618,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (phba->sli_rev == LPFC_SLI_REV4) {
spin_unlock(&pring_s4->ring_lock);
ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
- lpfc_sli4_abort_fcp_cmpl);
+ lpfc_sli_abort_fcp_cmpl);
} else {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
@@ -5986,15 +5631,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (ret_val != IOCB_SUCCESS) {
/* Indicate the IO is not being aborted by the driver. */
lpfc_cmd->waitq = NULL;
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
ret = FAILED;
- goto out;
+ goto out_unlock_hba;
}
/* no longer need the lock after this point */
- spin_unlock(&lpfc_cmd->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
@@ -6002,7 +5645,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
wait_for_cmpl:
/*
- * iocb_flag is set to LPFC_DRIVER_ABORTED before we wait
+ * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
* for abort to complete.
*/
wait_event_timeout(waitq,
@@ -6029,10 +5672,9 @@ wait_for_cmpl:
out_unlock_ring:
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
-out_unlock_buf:
- spin_unlock(&lpfc_cmd->buf_lock);
-out_unlock:
- spin_unlock_irqrestore(&phba->hbalock, flags);
+out_unlock_hba:
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
@@ -6139,7 +5781,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
/**
* lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
* @vport: The virtual port for which this call is being executed.
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * @rport: Pointer to remote port
* @tgt_id: Target ID of remote device.
* @lun_id: Lun number for the TMF
* @task_mgmt_cmd: type of TMF to send
@@ -6152,7 +5794,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
* 0x2002 - Success.
**/
static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
unsigned int tgt_id, uint64_t lun_id,
uint8_t task_mgmt_cmd)
{
@@ -6165,21 +5807,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
int ret;
int status;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode)
return FAILED;
pnode = rdata->pnode;
- lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
+ lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
if (lpfc_cmd == NULL)
return FAILED;
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
lpfc_cmd->rdata = rdata;
- lpfc_cmd->pCmd = cmnd;
+ lpfc_cmd->pCmd = NULL;
lpfc_cmd->ndlp = pnode;
- status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
- task_mgmt_cmd);
+ status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
+ task_mgmt_cmd);
if (!status) {
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
@@ -6191,38 +5833,41 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
+ iocbq->vport = vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %llu "
"rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
- iocbq->iocb_flag);
+ iocbq->cmd_flag);
status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if ((status != IOCB_SUCCESS) ||
- (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
+ (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
if (status != IOCB_SUCCESS ||
- iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
+ get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0727 TMF %s to TGT %d LUN %llu "
- "failed (%d, %d) iocb_flag x%x\n",
+ "failed (%d, %d) cmd_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id,
- iocbqrsp->iocb.ulpStatus,
- iocbqrsp->iocb.un.ulpWord[4],
- iocbq->iocb_flag);
+ get_job_ulpstatus(phba, iocbqrsp),
+ get_job_word4(phba, iocbqrsp),
+ iocbq->cmd_flag);
/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
if (status == IOCB_SUCCESS) {
- if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+ if (get_job_ulpstatus(phba, iocbqrsp) ==
+ IOSTAT_FCP_RSP_ERROR)
/* Something in the FCP_RSP was invalid.
* Check conditions */
ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
else
ret = FAILED;
- } else if (status == IOCB_TIMEDOUT) {
+ } else if ((status == IOCB_TIMEDOUT) ||
+ (status == IOCB_ABORTED)) {
ret = TIMEOUT_ERROR;
} else {
ret = FAILED;
@@ -6232,7 +5877,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
lpfc_sli_release_iocbq(phba, iocbqrsp);
- if (ret != TIMEOUT_ERROR)
+ if (status != IOCB_TIMEDOUT)
lpfc_release_scsi_buf(phba, lpfc_cmd);
return ret;
@@ -6241,7 +5886,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
/**
* lpfc_chk_tgt_mapped -
* @vport: The virtual port to check on
- * @cmnd: Pointer to scsi_cmnd data structure.
+ * @rport: Pointer to fc_rport data structure.
*
* This routine delays until the scsi target (aka rport) for the
* command exists (is present and logged in) or we declare it non-existent.
@@ -6251,19 +5896,20 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
* 0x2002 - Success
**/
static int
-lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
+lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
{
struct lpfc_rport_data *rdata;
- struct lpfc_nodelist *pnode;
+ struct lpfc_nodelist *pnode = NULL;
unsigned long later;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0797 Tgt Map rport failure: rdata x%px\n", rdata);
return FAILED;
}
pnode = rdata->pnode;
+
/*
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
@@ -6275,7 +5921,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
return SUCCESS;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ rdata = rport->dd_data;
if (!rdata)
return FAILED;
pnode = rdata->pnode;
@@ -6346,6 +5992,7 @@ static int
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
@@ -6355,7 +6002,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
int status;
u32 logit = LOG_FCP;
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rport)
+ return FAILED;
+
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0798 Device Reset rdata failure: rdata x%px\n",
@@ -6363,11 +6013,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0721 Device Reset rport failure: rdata x%px\n", rdata);
@@ -6383,7 +6033,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_LUN_RESET);
if (status != SUCCESS)
logit = LOG_TRACE_EVENT;
@@ -6420,6 +6070,7 @@ static int
lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
{
struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
@@ -6432,7 +6083,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned long flags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+ if (!rport)
+ return FAILED;
+
+ rdata = rport->dd_data;
if (!rdata || !rdata->pnode) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0799 Target Reset rdata failure: rdata x%px\n",
@@ -6440,11 +6094,11 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
return FAILED;
}
pnode = rdata->pnode;
- status = fc_block_scsi_eh(cmnd);
+ status = fc_block_rport(rport);
if (status != 0 && status != SUCCESS)
return status;
- status = lpfc_chk_tgt_mapped(vport, cmnd);
+ status = lpfc_chk_tgt_mapped(vport, rport);
if (status == FAILED) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
@@ -6468,7 +6122,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
- status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
+ status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
FCP_TARGET_RESET);
if (status != SUCCESS) {
logit = LOG_TRACE_EVENT;
@@ -6527,95 +6181,6 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
}
/**
- * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
- * @cmnd: Pointer to scsi_cmnd data structure.
- *
- * This routine does target reset to all targets on @cmnd->device->host.
- * This emulates Parallel SCSI Bus Reset Semantics.
- *
- * Return code :
- * 0x2003 - Error
- * 0x2002 - Success
- **/
-static int
-lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
-{
- struct Scsi_Host *shost = cmnd->device->host;
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_nodelist *ndlp = NULL;
- struct lpfc_scsi_event_header scsi_event;
- int match;
- int ret = SUCCESS, status, i;
- u32 logit = LOG_FCP;
-
- scsi_event.event_type = FC_REG_SCSI_EVENT;
- scsi_event.subcategory = LPFC_EVENT_BUSRESET;
- scsi_event.lun = 0;
- memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
- memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
-
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
-
- status = fc_block_scsi_eh(cmnd);
- if (status != 0 && status != SUCCESS)
- return status;
-
- /*
- * Since the driver manages a single bus device, reset all
- * targets known to the driver. Should any target reset
- * fail, this routine returns failure to the midlayer.
- */
- for (i = 0; i < LPFC_MAX_TARGET; i++) {
- /* Search for mapped node by target ID */
- match = 0;
- spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
-
- if (vport->phba->cfg_fcp2_no_tgt_reset &&
- (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
- continue;
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
- ndlp->nlp_sid == i &&
- ndlp->rport &&
- ndlp->nlp_type & NLP_FCP_TARGET) {
- match = 1;
- break;
- }
- }
- spin_unlock_irq(shost->host_lock);
- if (!match)
- continue;
-
- status = lpfc_send_taskmgmt(vport, cmnd,
- i, 0, FCP_TARGET_RESET);
-
- if (status != SUCCESS) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0700 Bus Reset on target %d failed\n",
- i);
- ret = FAILED;
- }
- }
- /*
- * We have to clean up i/o as : they may be orphaned by the TMFs
- * above; or if any of the TMFs failed, they may be in an
- * indeterminate state.
- * We will report success if all the i/o aborts successfully.
- */
-
- status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
- if (status != SUCCESS)
- ret = FAILED;
- if (ret == FAILED)
- logit = LOG_TRACE_EVENT;
-
- lpfc_printf_vlog(vport, KERN_ERR, logit,
- "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
- return ret;
-}
-
-/**
* lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
* @cmnd: Pointer to scsi_cmnd data structure.
*
@@ -7181,12 +6746,6 @@ lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
static int
-lpfc_no_handler(struct scsi_cmnd *cmnd)
-{
- return FAILED;
-}
-
-static int
lpfc_no_slave(struct scsi_device *sdev)
{
return -ENODEV;
@@ -7198,11 +6757,6 @@ struct scsi_host_template lpfc_template_nvme = {
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_no_command,
- .eh_abort_handler = lpfc_no_handler,
- .eh_device_reset_handler = lpfc_no_handler,
- .eh_target_reset_handler = lpfc_no_handler,
- .eh_bus_reset_handler = lpfc_no_handler,
- .eh_host_reset_handler = lpfc_no_handler,
.slave_alloc = lpfc_no_slave,
.slave_configure = lpfc_no_slave,
.scan_finished = lpfc_scan_finished,
@@ -7226,7 +6780,6 @@ struct scsi_host_template lpfc_template = {
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
.eh_host_reset_handler = lpfc_host_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
@@ -7241,3 +6794,30 @@ struct scsi_host_template lpfc_template = {
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
+
+struct scsi_host_template lpfc_vport_template = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
+ .eh_abort_handler = lpfc_abort_handler,
+ .eh_device_reset_handler = lpfc_device_reset_handler,
+ .eh_target_reset_handler = lpfc_target_reset_handler,
+ .eh_bus_reset_handler = NULL,
+ .eh_host_reset_handler = NULL,
+ .slave_alloc = lpfc_slave_alloc,
+ .slave_configure = lpfc_slave_configure,
+ .slave_destroy = lpfc_slave_destroy,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
+ .cmd_per_lun = LPFC_CMD_PER_LUN,
+ .shost_groups = lpfc_vport_groups,
+ .max_sectors = 0xFFFFFFFF,
+ .vendor_id = 0,
+ .change_queue_depth = scsi_change_queue_depth,
+ .track_queue_depth = 1,
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 3836d7f6a575..eae56944f31b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -126,10 +126,6 @@ struct fcp_cmnd {
};
-struct lpfc_scsicmd_bkt {
- uint32_t cmd_count;
-};
-
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 5dedb3de271d..99d06dc7ddf6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -70,8 +70,9 @@ static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint32_t);
static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint8_t *, uint32_t *);
-static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
- struct lpfc_iocbq *);
+static struct lpfc_iocbq *
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *rspiocbq);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
@@ -89,17 +90,14 @@ static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct lpfc_cqe *cqe);
+static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *pwqeq,
+ struct lpfc_sglq *sglq);
union lpfc_wqe128 lpfc_iread_cmd_template;
union lpfc_wqe128 lpfc_iwrite_cmd_template;
union lpfc_wqe128 lpfc_icmnd_cmd_template;
-static IOCB_t *
-lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
-{
- return &iocbq->iocb;
-}
-
/* Setup WQE templates for IOs */
void lpfc_wqe_cmd_template(void)
{
@@ -1251,29 +1249,24 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct lpfc_sglq *start_sglq = NULL;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_nodelist *ndlp;
- struct lpfc_sli_ring *pring = NULL;
int found = 0;
+ u8 cmnd;
- if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
- pring = phba->sli4_hba.nvmels_wq->pring;
- else
- pring = lpfc_phba_elsring(phba);
+ cmnd = get_job_cmnd(phba, piocbq);
- lockdep_assert_held(&pring->ring_lock);
-
- if (piocbq->iocb_flag & LPFC_IO_FCP) {
- lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
+ if (piocbq->cmd_flag & LPFC_IO_FCP) {
+ lpfc_cmd = piocbq->io_buf;
ndlp = lpfc_cmd->rdata->pnode;
- } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
- !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
- ndlp = piocbq->context_un.ndlp;
- } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
- if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
+ } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
+ !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
+ ndlp = piocbq->ndlp;
+ } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
+ if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
ndlp = NULL;
else
- ndlp = piocbq->context_un.ndlp;
+ ndlp = piocbq->ndlp;
} else {
- ndlp = piocbq->context1;
+ ndlp = piocbq->ndlp;
}
spin_lock(&phba->sli4_hba.sgl_list_lock);
@@ -1380,7 +1373,7 @@ static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
struct lpfc_sglq *sglq;
- size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+ size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
unsigned long iflag = 0;
struct lpfc_sli_ring *pring;
@@ -1391,7 +1384,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (sglq) {
- if (iocbq->iocb_flag & LPFC_IO_NVMET) {
+ if (iocbq->cmd_flag & LPFC_IO_NVMET) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
iflag);
sglq->state = SGL_FREED;
@@ -1403,7 +1396,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
goto out;
}
- if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+ if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
(!(unlikely(pci_channel_offline(phba->pcidev)))) &&
sglq->state != SGL_XRI_ABORTED) {
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
@@ -1440,7 +1433,7 @@ out:
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
- iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
+ iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
LPFC_IO_NVME_LS);
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -1530,17 +1523,21 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (piocb->wqe_cmpl) {
- if (piocb->iocb_flag & LPFC_IO_NVME)
+ if (piocb->cmd_cmpl) {
+ if (piocb->cmd_flag & LPFC_IO_NVME) {
lpfc_nvme_cancel_iocb(phba, piocb,
ulpstatus, ulpWord4);
- else
- lpfc_sli_release_iocbq(phba, piocb);
-
- } else if (piocb->iocb_cmpl) {
- piocb->iocb.ulpStatus = ulpstatus;
- piocb->iocb.un.ulpWord[4] = ulpWord4;
- (piocb->iocb_cmpl) (phba, piocb, piocb);
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(lpfc_wcqe_c_status,
+ &piocb->wcqe_cmpl, ulpstatus);
+ piocb->wcqe_cmpl.parameter = ulpWord4;
+ } else {
+ piocb->iocb.ulpStatus = ulpstatus;
+ piocb->iocb.un.ulpWord[4] = ulpWord4;
+ }
+ (piocb->cmd_cmpl) (phba, piocb, piocb);
+ }
} else {
lpfc_sli_release_iocbq(phba, piocb);
}
@@ -1724,20 +1721,18 @@ static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
- if (phba->sli_rev == LPFC_SLI_REV4)
- lockdep_assert_held(&pring->ring_lock);
- else
- lockdep_assert_held(&phba->hbalock);
+ u32 ulp_command = 0;
BUG_ON(!piocb);
+ ulp_command = get_job_cmnd(phba, piocb);
list_add_tail(&piocb->list, &pring->txcmplq);
- piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt++;
-
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
- (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
- (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
+ (ulp_command != CMD_ABORT_XRI_WQE) &&
+ (ulp_command != CMD_ABORT_XRI_CN) &&
+ (ulp_command != CMD_CLOSE_XRI_CN)) {
BUG_ON(!piocb->vport);
if (!(piocb->vport->load_flag & FC_UNLOADING))
mod_timer(&piocb->vport->els_tmofunc,
@@ -1773,7 +1768,7 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
* lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to driver command iocb object.
- * @cmf_cmpl: Pointer to completed WCQE.
+ * @rspiocb: Pointer to driver response iocb object.
*
* This routine will inform the driver of any BW adjustments we need
* to make. These changes will be picked up during the next CMF
@@ -1782,10 +1777,11 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
**/
static void
lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *cmf_cmpl)
+ struct lpfc_iocbq *rspiocb)
{
union lpfc_wqe128 *wqe;
uint32_t status, info;
+ struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
uint64_t bw, bwdif, slop;
uint64_t pcent, bwpcent;
int asig, afpin, sigcnt, fpincnt;
@@ -1793,22 +1789,22 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
char *s;
/* First check for error */
- status = bf_get(lpfc_wcqe_c_status, cmf_cmpl);
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
if (status) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6211 CMF_SYNC_WQE Error "
"req_tag x%x status x%x hwstatus x%x "
"tdatap x%x parm x%x\n",
- bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl),
- bf_get(lpfc_wcqe_c_status, cmf_cmpl),
- bf_get(lpfc_wcqe_c_hw_status, cmf_cmpl),
- cmf_cmpl->total_data_placed,
- cmf_cmpl->parameter);
+ bf_get(lpfc_wcqe_c_request_tag, wcqe),
+ bf_get(lpfc_wcqe_c_status, wcqe),
+ bf_get(lpfc_wcqe_c_hw_status, wcqe),
+ wcqe->total_data_placed,
+ wcqe->parameter);
goto out;
}
/* Gather congestion information on a successful cmpl */
- info = cmf_cmpl->parameter;
+ info = wcqe->parameter;
phba->cmf_active_info = info;
/* See if firmware info count is valid or has changed */
@@ -1817,15 +1813,15 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else
phba->cmf_info_per_interval = info;
- tdp = bf_get(lpfc_wcqe_c_cmf_bw, cmf_cmpl);
- cg = bf_get(lpfc_wcqe_c_cmf_cg, cmf_cmpl);
+ tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
+ cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
/* Get BW requirement from firmware */
bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
if (!bw) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6212 CMF_SYNC_WQE x%x: NULL bw\n",
- bf_get(lpfc_wcqe_c_request_tag, cmf_cmpl));
+ bf_get(lpfc_wcqe_c_request_tag, wcqe));
goto out;
}
@@ -1920,6 +1916,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
unsigned long iflags;
u32 ret_val;
u32 atot, wtot, max;
+ u16 warn_sync_period = 0;
/* First address any alarm / warning activity */
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
@@ -1934,7 +1931,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
sync_buf = __lpfc_sli_get_iocbq(phba);
if (!sync_buf) {
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
- "6213 No available WQEs for CMF_SYNC_WQE\n");
+ "6244 No available WQEs for CMF_SYNC_WQE\n");
ret_val = ENOMEM;
goto out_unlock;
}
@@ -1974,10 +1971,14 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
lpfc_acqe_cgn_frequency;
bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
+ warn_sync_period = lpfc_acqe_cgn_frequency;
} else {
/* We hit a FPIN warning condition */
bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
+ if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
+ warn_sync_period =
+ LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
}
}
@@ -1993,25 +1994,27 @@ initpath:
bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
+ bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
sync_buf->vport = phba->pport;
- sync_buf->wqe_cmpl = lpfc_cmf_sync_cmpl;
- sync_buf->iocb_cmpl = NULL;
- sync_buf->context1 = NULL;
- sync_buf->context2 = NULL;
- sync_buf->context3 = NULL;
+ sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
+ sync_buf->cmd_dmabuf = NULL;
+ sync_buf->rsp_dmabuf = NULL;
+ sync_buf->bpl_dmabuf = NULL;
sync_buf->sli4_xritag = NO_XRI;
- sync_buf->iocb_flag |= LPFC_IO_CMF;
+ sync_buf->cmd_flag |= LPFC_IO_CMF;
ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
- if (ret_val)
+ if (ret_val) {
lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
ret_val);
+ __lpfc_sli_release_iocbq(phba, sync_buf);
+ }
out_unlock:
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret_val;
@@ -2173,7 +2176,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* Set up an iotag
*/
- nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
+ nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
if (pring->ringno == LPFC_ELS_RING) {
@@ -2194,9 +2197,9 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/*
* If there is no completion routine to call, we can release the
* IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
- * that have no rsp ring completion, iocb_cmpl MUST be NULL.
+ * that have no rsp ring completion, cmd_cmpl MUST be NULL.
*/
- if (nextiocb->iocb_cmpl)
+ if (nextiocb->cmd_cmpl)
lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
else
__lpfc_sli_release_iocbq(phba, nextiocb);
@@ -2833,6 +2836,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_flag &= ~NLP_UNREG_INP;
}
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ __lpfc_sli_rpi_release(vport, ndlp);
+}
+
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
* @phba: Pointer to HBA context object.
@@ -2853,13 +2862,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint16_t rpi, vpi;
int rc;
- mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
-
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
-
/*
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
@@ -2867,6 +2869,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (!(phba->pport->load_flag & FC_UNLOADING) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
+ if (mp) {
+ pmb->ctx_buf = NULL;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2892,8 +2900,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
lpfc_nlp_put(ndlp);
- pmb->ctx_buf = NULL;
- pmb->ctx_ndlp = NULL;
}
if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
@@ -2944,7 +2950,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
}
/**
* lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
@@ -3196,7 +3202,7 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
uint32_t oxid, sid, did, fctl, size;
int ret = 1;
- d_buf = piocb->context2;
+ d_buf = piocb->cmd_dmabuf;
nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
fc_hdr = nvmebuf->hbuf.virt;
@@ -3359,6 +3365,56 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
return 0;
}
+static void
+lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
+ struct lpfc_iocbq *saveq)
+{
+ IOCB_t *irsp;
+ union lpfc_wqe128 *wqe;
+ u16 i = 0;
+
+ irsp = &saveq->iocb;
+ wqe = &saveq->wqe;
+
+ /* Fill wcqe with the IOCB status fields */
+ bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
+ saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
+ saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
+ saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
+
+ /* Source ID */
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
+
+ /* rx-id of the response frame */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
+
+ /* ox-id of the frame */
+ bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+ irsp->unsli3.rcvsli3.ox_id);
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+ irsp->un.rcvels.remoteID);
+
+ /* unsol data len */
+ for (i = 0; i < irsp->ulpBdeCount; i++) {
+ struct lpfc_hbq_entry *hbqe = NULL;
+
+ if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ if (i == 0) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->un.ulpWord[0];
+ saveq->wqe.gen_req.bde.tus.f.bdeSize =
+ hbqe->bde.tus.f.bdeSize;
+ } else if (i == 1) {
+ hbqe = (struct lpfc_hbq_entry *)
+ &irsp->unsli3.sli3Words[4];
+ saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
+ }
+ }
+ }
+}
+
/**
* lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
* @phba: Pointer to HBA context object.
@@ -3379,11 +3435,13 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
IOCB_t * irsp;
WORD5 * w5p;
+ dma_addr_t paddr;
uint32_t Rctl, Type;
struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
- irsp = &(saveq->iocb);
+ irsp = &saveq->iocb;
+ saveq->vport = phba->pport;
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
@@ -3401,22 +3459,22 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
- (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
+ (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
if (irsp->ulpBdeCount > 0) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->un.ulpWord[3]);
+ irsp->un.ulpWord[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 1) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[3]);
+ irsp->unsli3.sli3Words[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 2) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
- irsp->unsli3.sli3Words[7]);
+ irsp->unsli3.sli3Words[7]);
lpfc_in_buf_free(phba, dmzbuf);
}
@@ -3425,9 +3483,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
if (irsp->ulpBdeCount != 0) {
- saveq->context2 = lpfc_sli_get_buff(phba, pring,
+ saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
irsp->un.ulpWord[3]);
- if (!saveq->context2)
+ if (!saveq->cmd_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3437,9 +3495,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
- saveq->context3 = lpfc_sli_get_buff(phba, pring,
+ saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[7]);
- if (!saveq->context3)
+ if (!saveq->bpl_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3449,11 +3507,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->unsli3.sli3Words[7]);
}
list_for_each_entry(iocbq, &saveq->list, list) {
- irsp = &(iocbq->iocb);
+ irsp = &iocbq->iocb;
if (irsp->ulpBdeCount != 0) {
- iocbq->context2 = lpfc_sli_get_buff(phba, pring,
+ iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
+ pring,
irsp->un.ulpWord[3]);
- if (!iocbq->context2)
+ if (!iocbq->cmd_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3463,9 +3522,10 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
- iocbq->context3 = lpfc_sli_get_buff(phba, pring,
+ iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
+ pring,
irsp->unsli3.sli3Words[7]);
- if (!iocbq->context3)
+ if (!iocbq->bpl_dmabuf)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
@@ -3476,7 +3536,20 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
irsp->unsli3.sli3Words[7]);
}
}
+ } else {
+ paddr = getPaddr(irsp->un.cont64[0].addrHigh,
+ irsp->un.cont64[0].addrLow);
+ saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
+ paddr);
+ if (irsp->ulpBdeCount == 2) {
+ paddr = getPaddr(irsp->un.cont64[1].addrHigh,
+ irsp->un.cont64[1].addrLow);
+ saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
+ pring,
+ paddr);
+ }
}
+
if (irsp->ulpBdeCount != 0 &&
(irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
@@ -3494,12 +3567,14 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (!found)
list_add_tail(&saveq->clist,
&pring->iocb_continue_saveq);
+
if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
list_del_init(&iocbq->clist);
saveq = iocbq;
- irsp = &(saveq->iocb);
- } else
+ irsp = &saveq->iocb;
+ } else {
return 0;
+ }
}
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
(irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
@@ -3522,6 +3597,19 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
}
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+ (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
+ irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
+ if (irsp->unsli3.rcvsli3.vpi == 0xffff)
+ saveq->vport = phba->pport;
+ else
+ saveq->vport = lpfc_find_vport_by_vpid(phba,
+ irsp->unsli3.rcvsli3.vpi);
+ }
+
+ /* Prepare WQE with Unsol frame */
+ lpfc_sli_prep_unsol_wqe(phba, saveq);
+
if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0313 Ring %d handler: unexpected Rctl x%x "
@@ -3550,36 +3638,28 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
struct lpfc_iocbq *prspiocb)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- uint16_t iotag;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
+ u16 iotag;
if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
+ iotag = get_wqe_reqtag(prspiocb);
else
- temp_lock = &phba->hbalock;
-
- spin_lock_irqsave(temp_lock, iflag);
- iotag = prspiocb->iocb.ulpIoTag;
+ iotag = prspiocb->iocb.ulpIoTag;
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0317 iotag x%x is out of "
- "range: max iotag x%x wd0 x%x\n",
- iotag, phba->sli.last_iotag,
- *(((uint32_t *) &prspiocb->iocb) + 7));
+ "range: max iotag x%x\n",
+ iotag, phba->sli.last_iotag);
return NULL;
}
@@ -3600,33 +3680,23 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint16_t iotag)
{
struct lpfc_iocbq *cmd_iocb = NULL;
- spinlock_t *temp_lock = NULL;
- unsigned long iflag = 0;
- if (phba->sli_rev == LPFC_SLI_REV4)
- temp_lock = &pring->ring_lock;
- else
- temp_lock = &phba->hbalock;
-
- spin_lock_irqsave(temp_lock, iflag);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
- if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+ if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
/* remove from txcmpl queue list */
list_del_init(&cmd_iocb->list);
- cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
pring->txcmplq_cnt--;
- spin_unlock_irqrestore(temp_lock, iflag);
return cmd_iocb;
}
}
- spin_unlock_irqrestore(temp_lock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0372 iotag x%x lookup error: max iotag (x%x) "
- "iocb_flag x%x\n",
+ "cmd_flag x%x\n",
iotag, phba->sli.last_iotag,
- cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
+ cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
return NULL;
}
@@ -3652,20 +3722,38 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
{
struct lpfc_iocbq *cmdiocbp;
- int rc = 1;
unsigned long iflag;
+ u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+ ulp_command = get_job_cmnd(phba, saveq);
+ ulp_status = get_job_ulpstatus(phba, saveq);
+ ulp_word4 = get_job_word4(phba, saveq);
+ ulp_context = get_job_ulpcontext(phba, saveq);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ iotag = get_wqe_reqtag(saveq);
+ else
+ iotag = saveq->iocb.ulpIoTag;
+
if (cmdiocbp) {
- if (cmdiocbp->iocb_cmpl) {
+ ulp_command = get_job_cmnd(phba, cmdiocbp);
+ if (cmdiocbp->cmd_cmpl) {
/*
* If an ELS command failed send an event to mgmt
* application.
*/
- if (saveq->iocb.ulpStatus &&
+ if (ulp_status &&
(pring->ringno == LPFC_ELS_RING) &&
- (cmdiocbp->iocb.ulpCommand ==
- CMD_ELS_REQUEST64_CR))
+ (ulp_command == CMD_ELS_REQUEST64_CR))
lpfc_send_els_failure_event(phba,
cmdiocbp, saveq);
@@ -3675,11 +3763,11 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
if (pring->ringno == LPFC_ELS_RING) {
if ((phba->sli_rev < LPFC_SLI_REV4) &&
- (cmdiocbp->iocb_flag &
+ (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED)) {
spin_lock_irqsave(&phba->hbalock,
iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
@@ -3694,12 +3782,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(&phba->hbalock,
iflag);
- saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
+ saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(&phba->hbalock,
iflag);
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (saveq->iocb_flag &
+ if (saveq->cmd_flag &
LPFC_EXCHANGE_BUSY) {
/* Set cmdiocb flag for the
* exchange busy so sgl (xri)
@@ -3709,12 +3797,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag |=
+ cmdiocbp->cmd_flag |=
LPFC_EXCHANGE_BUSY;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
- if (cmdiocbp->iocb_flag &
+ if (cmdiocbp->cmd_flag &
LPFC_DRIVER_ABORTED) {
/*
* Clear LPFC_DRIVER_ABORTED
@@ -3723,34 +3811,34 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
spin_lock_irqsave(
&phba->hbalock, iflag);
- cmdiocbp->iocb_flag &=
+ cmdiocbp->cmd_flag &=
~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
- cmdiocbp->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- cmdiocbp->iocb.un.ulpWord[4] =
- IOERR_ABORT_REQUESTED;
+ set_job_ulpstatus(cmdiocbp,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(cmdiocbp,
+ IOERR_ABORT_REQUESTED);
/*
- * For SLI4, irsiocb contains
+ * For SLI4, irspiocb contains
* NO_XRI in sli_xritag, it
* shall not affect releasing
* sgl (xri) process.
*/
- saveq->iocb.ulpStatus =
- IOSTAT_LOCAL_REJECT;
- saveq->iocb.un.ulpWord[4] =
- IOERR_SLI_ABORTED;
+ set_job_ulpstatus(saveq,
+ IOSTAT_LOCAL_REJECT);
+ set_job_ulpword4(saveq,
+ IOERR_SLI_ABORTED);
spin_lock_irqsave(
&phba->hbalock, iflag);
- saveq->iocb_flag |=
+ saveq->cmd_flag |=
LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
}
}
- (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
+ cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
@@ -3768,16 +3856,12 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
"0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
- pring->ringno,
- saveq->iocb.ulpIoTag,
- saveq->iocb.ulpStatus,
- saveq->iocb.un.ulpWord[4],
- saveq->iocb.ulpCommand,
- saveq->iocb.ulpContext);
+ pring->ringno, iotag, ulp_status,
+ ulp_word4, ulp_command, ulp_context);
}
}
- return rc;
+ return 1;
}
/**
@@ -3986,18 +4070,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
break;
}
- spin_unlock_irqrestore(&phba->hbalock, iflag);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
- spin_lock_irqsave(&phba->hbalock, iflag);
if (unlikely(!cmdiocbq))
break;
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- if (cmdiocbq->iocb_cmpl) {
+ if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ if (cmdiocbq->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
- &rspiocbq);
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag);
}
break;
@@ -4088,155 +4169,159 @@ lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *rspiocbp)
{
struct lpfc_iocbq *saveq;
- struct lpfc_iocbq *cmdiocbp;
+ struct lpfc_iocbq *cmdiocb;
struct lpfc_iocbq *next_iocb;
- IOCB_t *irsp = NULL;
+ IOCB_t *irsp;
uint32_t free_saveq;
- uint8_t iocb_cmd_type;
+ u8 cmd_type;
lpfc_iocb_type type;
unsigned long iflag;
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
+ u32 ulp_word4 = get_job_word4(phba, rspiocbp);
+ u32 ulp_command = get_job_cmnd(phba, rspiocbp);
int rc;
spin_lock_irqsave(&phba->hbalock, iflag);
/* First add the response iocb to the countinueq list */
- list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+ list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
pring->iocb_continueq_cnt++;
- /* Now, determine whether the list is completed for processing */
- irsp = &rspiocbp->iocb;
- if (irsp->ulpLe) {
- /*
- * By default, the driver expects to free all resources
- * associated with this iocb completion.
- */
- free_saveq = 1;
- saveq = list_get_first(&pring->iocb_continueq,
- struct lpfc_iocbq, list);
- irsp = &(saveq->iocb);
- list_del_init(&pring->iocb_continueq);
- pring->iocb_continueq_cnt = 0;
+ /*
+ * By default, the driver expects to free all resources
+ * associated with this iocb completion.
+ */
+ free_saveq = 1;
+ saveq = list_get_first(&pring->iocb_continueq,
+ struct lpfc_iocbq, list);
+ list_del_init(&pring->iocb_continueq);
+ pring->iocb_continueq_cnt = 0;
- pring->stats.iocb_rsp++;
+ pring->stats.iocb_rsp++;
- /*
- * If resource errors reported from HBA, reduce
- * queuedepths of the SCSI device.
- */
- if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
- ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
- IOERR_NO_RESOURCES)) {
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- phba->lpfc_rampdown_queue_depth(phba);
- spin_lock_irqsave(&phba->hbalock, iflag);
- }
+ /*
+ * If resource errors reported from HBA, reduce
+ * queuedepths of the SCSI device.
+ */
+ if (ulp_status == IOSTAT_LOCAL_REJECT &&
+ ((ulp_word4 & IOERR_PARAM_MASK) ==
+ IOERR_NO_RESOURCES)) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ phba->lpfc_rampdown_queue_depth(phba);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ }
- if (irsp->ulpStatus) {
- /* Rsp ring <ringno> error: IOCB */
+ if (ulp_status) {
+ /* Rsp ring <ringno> error: IOCB */
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ irsp = &rspiocbp->iocb;
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0328 Rsp Ring %d error: "
+ "0328 Rsp Ring %d error: ulp_status x%x "
+ "IOCB Data: "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x "
+ "x%08x x%08x x%08x x%08x\n",
+ pring->ringno, ulp_status,
+ get_job_ulpword(rspiocbp, 0),
+ get_job_ulpword(rspiocbp, 1),
+ get_job_ulpword(rspiocbp, 2),
+ get_job_ulpword(rspiocbp, 3),
+ get_job_ulpword(rspiocbp, 4),
+ get_job_ulpword(rspiocbp, 5),
+ *(((uint32_t *)irsp) + 6),
+ *(((uint32_t *)irsp) + 7),
+ *(((uint32_t *)irsp) + 8),
+ *(((uint32_t *)irsp) + 9),
+ *(((uint32_t *)irsp) + 10),
+ *(((uint32_t *)irsp) + 11),
+ *(((uint32_t *)irsp) + 12),
+ *(((uint32_t *)irsp) + 13),
+ *(((uint32_t *)irsp) + 14),
+ *(((uint32_t *)irsp) + 15));
+ } else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0321 Rsp Ring %d error: "
"IOCB Data: "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
- "x%x x%x x%x x%x "
"x%x x%x x%x x%x\n",
pring->ringno,
- irsp->un.ulpWord[0],
- irsp->un.ulpWord[1],
- irsp->un.ulpWord[2],
- irsp->un.ulpWord[3],
- irsp->un.ulpWord[4],
- irsp->un.ulpWord[5],
- *(((uint32_t *) irsp) + 6),
- *(((uint32_t *) irsp) + 7),
- *(((uint32_t *) irsp) + 8),
- *(((uint32_t *) irsp) + 9),
- *(((uint32_t *) irsp) + 10),
- *(((uint32_t *) irsp) + 11),
- *(((uint32_t *) irsp) + 12),
- *(((uint32_t *) irsp) + 13),
- *(((uint32_t *) irsp) + 14),
- *(((uint32_t *) irsp) + 15));
+ rspiocbp->wcqe_cmpl.word0,
+ rspiocbp->wcqe_cmpl.total_data_placed,
+ rspiocbp->wcqe_cmpl.parameter,
+ rspiocbp->wcqe_cmpl.word3);
}
+ }
- /*
- * Fetch the IOCB command type and call the correct completion
- * routine. Solicited and Unsolicited IOCBs on the ELS ring
- * get freed back to the lpfc_iocb_list by the discovery
- * kernel thread.
- */
- iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
- type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
- switch (type) {
- case LPFC_SOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- break;
-
- case LPFC_UNSOL_IOCB:
- spin_unlock_irqrestore(&phba->hbalock, iflag);
- rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
- spin_lock_irqsave(&phba->hbalock, iflag);
- if (!rc)
- free_saveq = 0;
- break;
- case LPFC_ABORT_IOCB:
- cmdiocbp = NULL;
- if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
+ /*
+ * Fetch the iocb command type and call the correct completion
+ * routine. Solicited and Unsolicited IOCBs on the ELS ring
+ * get freed back to the lpfc_iocb_list by the discovery
+ * kernel thread.
+ */
+ cmd_type = ulp_command & CMD_IOCB_MASK;
+ type = lpfc_sli_iocb_cmd_type(cmd_type);
+ switch (type) {
+ case LPFC_SOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ break;
+ case LPFC_UNSOL_IOCB:
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ if (!rc)
+ free_saveq = 0;
+ break;
+ case LPFC_ABORT_IOCB:
+ cmdiocb = NULL;
+ if (ulp_command != CMD_XRI_ABORTED_CX)
+ cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
+ saveq);
+ if (cmdiocb) {
+ /* Call the specified completion routine */
+ if (cmdiocb->cmd_cmpl) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
- cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
- saveq);
+ cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
- }
- if (cmdiocbp) {
- /* Call the specified completion routine */
- if (cmdiocbp->iocb_cmpl) {
- spin_unlock_irqrestore(&phba->hbalock,
- iflag);
- (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
- saveq);
- spin_lock_irqsave(&phba->hbalock,
- iflag);
- } else
- __lpfc_sli_release_iocbq(phba,
- cmdiocbp);
- }
- break;
-
- case LPFC_UNKNOWN_IOCB:
- if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
- char adaptermsg[LPFC_MAX_ADPTMSG];
- memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
- memcpy(&adaptermsg[0], (uint8_t *)irsp,
- MAX_MSG_DATA);
- dev_warn(&((phba->pcidev)->dev),
- "lpfc%d: %s\n",
- phba->brd_no, adaptermsg);
} else {
- /* Unknown IOCB command */
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0335 Unknown IOCB "
- "command Data: x%x "
- "x%x x%x x%x\n",
- irsp->ulpCommand,
- irsp->ulpStatus,
- irsp->ulpIoTag,
- irsp->ulpContext);
+ __lpfc_sli_release_iocbq(phba, cmdiocb);
}
- break;
}
+ break;
+ case LPFC_UNKNOWN_IOCB:
+ if (ulp_command == CMD_ADAPTER_MSG) {
+ char adaptermsg[LPFC_MAX_ADPTMSG];
+
+ memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+ memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
+ MAX_MSG_DATA);
+ dev_warn(&((phba->pcidev)->dev),
+ "lpfc%d: %s\n",
+ phba->brd_no, adaptermsg);
+ } else {
+ /* Unknown command */
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0335 Unknown IOCB "
+ "command Data: x%x "
+ "x%x x%x x%x\n",
+ ulp_command,
+ ulp_status,
+ get_wqe_reqtag(rspiocbp),
+ get_job_ulpcontext(phba, rspiocbp));
+ }
+ break;
+ }
- if (free_saveq) {
- list_for_each_entry_safe(rspiocbp, next_iocb,
- &saveq->list, list) {
- list_del_init(&rspiocbp->list);
- __lpfc_sli_release_iocbq(phba, rspiocbp);
- }
- __lpfc_sli_release_iocbq(phba, saveq);
+ if (free_saveq) {
+ list_for_each_entry_safe(rspiocbp, next_iocb,
+ &saveq->list, list) {
+ list_del_init(&rspiocbp->list);
+ __lpfc_sli_release_iocbq(phba, rspiocbp);
}
- rspiocbp = NULL;
+ __lpfc_sli_release_iocbq(phba, saveq);
}
+ rspiocbp = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rspiocbp;
}
@@ -4429,8 +4514,8 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
irspiocbq = container_of(cq_event, struct lpfc_iocbq,
cq_event);
/* Translate ELS WCQE to response IOCBQ */
- irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
- irspiocbq);
+ irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
+ irspiocbq);
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
@@ -4466,42 +4551,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
- LIST_HEAD(completions);
+ LIST_HEAD(tx_completions);
+ LIST_HEAD(txcmplq_completions);
struct lpfc_iocbq *iocb, *next_iocb;
+ int offline;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
+ offline = pci_channel_offline(phba->pcidev);
/* Error everything on txq and txcmplq
* First do the txq.
*/
if (phba->sli_rev >= LPFC_SLI_REV4) {
spin_lock_irq(&pring->ring_lock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- spin_unlock_irq(&pring->ring_lock);
- spin_lock_irq(&phba->hbalock);
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
- spin_unlock_irq(&phba->hbalock);
+ if (offline) {
+ list_splice_init(&pring->txcmplq,
+ &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
+ spin_unlock_irq(&pring->ring_lock);
} else {
spin_lock_irq(&phba->hbalock);
- list_splice_init(&pring->txq, &completions);
+ list_splice_init(&pring->txq, &tx_completions);
pring->txq_cnt = 0;
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
- lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+ if (offline) {
+ list_splice_init(&pring->txcmplq, &txcmplq_completions);
+ } else {
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list)
+ lpfc_sli_issue_abort_iotag(phba, pring,
+ iocb, NULL);
+ }
spin_unlock_irq(&phba->hbalock);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (offline) {
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+ } else {
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+ }
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+ lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
@@ -4554,11 +4659,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
- if (phba->hba_flag & HBA_IOQ_FLUSH ||
- !phba->sli4_hba.hdwq) {
- spin_unlock_irq(&phba->hbalock);
- return;
- }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -4573,7 +4673,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
@@ -4599,7 +4699,7 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
list_splice_init(&pring->txq, &txq);
list_for_each_entry_safe(piocb, next_iocb,
&pring->txcmplq, list)
- piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+ piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txq_cnt = 0;
@@ -4749,7 +4849,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
- volatile uint32_t mbox;
+ volatile struct MAILBOX_word0 mbox;
uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@@ -4783,13 +4883,13 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
phba->pport->stopped = 1;
}
- mbox = 0;
- ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+ mbox.word0 = 0;
+ mbox.mbxCommand = MBX_KILL_BOARD;
+ mbox.mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
mbox_buf = phba->MBslimaddr;
- writel(mbox, mbox_buf);
+ writel(mbox.word0, mbox_buf);
for (i = 0; i < 50; i++) {
if (lpfc_readl((resp_buf + 1), &resp_data))
@@ -4810,12 +4910,12 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
goto clear_errat;
}
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+ mbox.mbxOwner = OWN_HOST;
resp_data = 0;
for (i = 0; i < 500; i++) {
if (lpfc_readl(resp_buf, &resp_data))
return;
- if (resp_data != mbox)
+ if (resp_data != mbox.word0)
mdelay(1);
else
break;
@@ -5046,12 +5146,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
- /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
- if (phba->hba_flag & HBA_FW_DUMP_OP) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return rc;
- }
-
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -5091,9 +5185,8 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
static int
lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
- MAILBOX_t *mb;
+ volatile struct MAILBOX_word0 mb;
struct lpfc_sli *psli;
- volatile uint32_t word0;
void __iomem *to_slim;
uint32_t hba_aer_enabled;
@@ -5110,24 +5203,23 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
(phba->pport) ? phba->pport->port_state : 0,
psli->sli_flag);
- word0 = 0;
- mb = (MAILBOX_t *) &word0;
- mb->mbxCommand = MBX_RESTART;
- mb->mbxHc = 1;
+ mb.word0 = 0;
+ mb.mbxCommand = MBX_RESTART;
+ mb.mbxHc = 1;
lpfc_reset_barrier(phba);
to_slim = phba->MBslimaddr;
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport && phba->pport->port_state)
- word0 = 1; /* This is really setting up word1 */
+ mb.word0 = 1; /* This is really setting up word1 */
else
- word0 = 0; /* This is really setting up word1 */
+ mb.word0 = 0; /* This is really setting up word1 */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
@@ -5186,6 +5278,8 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
+ /* Preserve FA-PWWN expectation */
+ phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -5762,26 +5856,20 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
mboxq->mcqe.trailer);
if (rc) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = -EIO;
goto out_free_mboxq;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_RGN23_SIZE) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = -EIO;
goto out_free_mboxq;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
rc = 0;
out_free_mboxq:
- mempool_free(mboxq, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
return rc;
}
@@ -5980,6 +6068,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
/* obtain link type and link number via READ_CONFIG */
phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
lpfc_sli4_read_config(phba);
+
+ if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
+ phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
+
if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
goto retrieve_ppname;
@@ -6123,6 +6215,9 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
LPFC_MBOXQ_t *mbox;
+ *extnt_count = 0;
+ *extnt_size = 0;
+
mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -6738,8 +6833,13 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
phba->sli4_hba.pc_sli4_params.mi_ver);
break;
+ case LPFC_SET_LD_SIGNAL:
+ mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
+ mbox->u.mqe.un.set_feature.param_len = 16;
+ bf_set(lpfc_mbx_set_feature_lds_qry,
+ &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
+ break;
case LPFC_SET_ENABLE_CMF:
- bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
mbox->u.mqe.un.set_feature.param_len = 4;
bf_set(lpfc_mbx_set_feature_cmf,
@@ -7735,6 +7835,62 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
}
static void
+lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ union lpfc_sli4_cfg_shdr *shdr;
+ u32 shdr_status, shdr_add_status;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
+ "4622 SET_FEATURE (x%x) mbox failed, "
+ "status x%x add_status x%x, mbx status x%x\n",
+ LPFC_SET_LD_SIGNAL, shdr_status,
+ shdr_add_status, pmb->u.mb.mbxStatus);
+ phba->degrade_activate_threshold = 0;
+ phba->degrade_deactivate_threshold = 0;
+ phba->fec_degrade_interval = 0;
+ goto out;
+ }
+
+ phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
+ phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
+ phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
+ "4624 Success: da x%x dd x%x interval x%x\n",
+ phba->degrade_activate_threshold,
+ phba->degrade_deactivate_threshold,
+ phba->fec_degrade_interval);
+out:
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+int
+lpfc_read_lds_params(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
+ return -ENOMEM;
+
+ lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
+ mboxq->vport = phba->pport;
+ mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+ return 0;
+}
+
+static void
lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_vport *vport = pmb->vport;
@@ -7881,6 +8037,172 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
}
/**
+ * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entries: Number of rx_info_entry objects to allocate in ring
+ *
+ * Return:
+ * 0 - Success
+ * ENOMEM - Failure to kmalloc
+ **/
+int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
+ u32 entries)
+{
+ rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
+ GFP_KERNEL);
+ if (!rx_monitor->ring)
+ return -ENOMEM;
+
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_lock_init(&rx_monitor->lock);
+ rx_monitor->entries = entries;
+
+ return 0;
+}
+
+/**
+ * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ **/
+void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
+{
+ spin_lock(&rx_monitor->lock);
+ kfree(rx_monitor->ring);
+ rx_monitor->ring = NULL;
+ rx_monitor->entries = 0;
+ rx_monitor->head_idx = 0;
+ rx_monitor->tail_idx = 0;
+ spin_unlock(&rx_monitor->lock);
+}
+
+/**
+ * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @entry: Pointer to rx_info_entry
+ *
+ * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
+ * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
+ *
+ * This is called from lpfc_cmf_timer, which is in timer/softirq context.
+ *
+ * In cases of old data overflow, we do a best effort of FIFO order.
+ **/
+void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
+ struct rx_info_entry *entry)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+
+ spin_lock(ring_lock);
+ memcpy(&ring[*tail_idx], entry, sizeof(*entry));
+ *tail_idx = (*tail_idx + 1) % ring_size;
+
+ /* Best effort of FIFO saved data */
+ if (*tail_idx == *head_idx)
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ spin_unlock(ring_lock);
+}
+
+/**
+ * lpfc_rx_monitor_report - Read out rx_monitor's ring
+ * @phba: Pointer to lpfc_hba object
+ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
+ * @buf: Pointer to char buffer that will contain rx monitor info data
+ * @buf_len: Length buf including null char
+ * @max_read_entries: Maximum number of entries to read out of ring
+ *
+ * Used to dump/read what's in rx_monitor's ring buffer.
+ *
+ * If buf is NULL || buf_len == 0, then it is implied that we want to log the
+ * information to kmsg instead of filling out buf.
+ *
+ * Return:
+ * Number of entries read out of the ring
+ **/
+u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
+ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
+ u32 buf_len, u32 max_read_entries)
+{
+ struct rx_info_entry *ring = rx_monitor->ring;
+ struct rx_info_entry *entry;
+ u32 *head_idx = &rx_monitor->head_idx;
+ u32 *tail_idx = &rx_monitor->tail_idx;
+ spinlock_t *ring_lock = &rx_monitor->lock;
+ u32 ring_size = rx_monitor->entries;
+ u32 cnt = 0;
+ char tmp[DBG_LOG_STR_SZ] = {0};
+ bool log_to_kmsg = (!buf || !buf_len) ? true : false;
+
+ if (!log_to_kmsg) {
+ /* clear the buffer to be sure */
+ memset(buf, 0, buf_len);
+
+ scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
+ "%-8s%-8s%-8s%-16s\n",
+ "MaxBPI", "Tot_Data_CMF",
+ "Tot_Data_Cmd", "Tot_Data_Cmpl",
+ "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
+ "IO_cnt", "Info", "BWutil(ms)");
+ }
+
+ /* Needs to be _bh because record is called from timer interrupt
+ * context
+ */
+ spin_lock_bh(ring_lock);
+ while (*head_idx != *tail_idx) {
+ entry = &ring[*head_idx];
+
+ /* Read out this entry's data. */
+ if (!log_to_kmsg) {
+ /* If !log_to_kmsg, then store to buf. */
+ scnprintf(tmp, sizeof(tmp),
+ "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
+ "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
+ *head_idx, entry->max_bytes_per_interval,
+ entry->cmf_bytes, entry->total_bytes,
+ entry->rcv_bytes, entry->avg_io_latency,
+ entry->avg_io_size, entry->max_read_cnt,
+ entry->cmf_busy, entry->io_cnt,
+ entry->cmf_info, entry->timer_utilization,
+ entry->timer_interval);
+
+ /* Check for buffer overflow */
+ if ((strlen(buf) + strlen(tmp)) >= buf_len)
+ break;
+
+ /* Append entry's data to buffer */
+ strlcat(buf, tmp, buf_len);
+ } else {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+ "4410 %02u: MBPI %llu Xmit %llu "
+ "Cmpl %llu Lat %llu ASz %llu Info %02u "
+ "BWUtil %u Int %u slot %u\n",
+ cnt, entry->max_bytes_per_interval,
+ entry->total_bytes, entry->rcv_bytes,
+ entry->avg_io_latency,
+ entry->avg_io_size, entry->cmf_info,
+ entry->timer_utilization,
+ entry->timer_interval, *head_idx);
+ }
+
+ *head_idx = (*head_idx + 1) % ring_size;
+
+ /* Don't feed more than max_read_entries */
+ cnt++;
+ if (cnt >= max_read_entries)
+ break;
+ }
+ spin_unlock_bh(ring_lock);
+
+ return cnt;
+}
+
+/**
* lpfc_cmf_setup - Initialize idle_stat tracking
* @phba: Pointer to HBA context object.
*
@@ -7905,10 +8227,6 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
sli4_params = &phba->sli4_hba.pc_sli4_params;
- /* Are we forcing MI off via module parameter? */
- if (!phba->cfg_enable_mi)
- sli4_params->mi_ver = 0;
-
/* Always try to enable MI feature if we can */
if (sli4_params->mi_ver) {
lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
@@ -8058,19 +8376,29 @@ no_cmf:
phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
/* Allocate RX Monitor Buffer */
- if (!phba->rxtable) {
- phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
- sizeof(struct rxtable_entry),
- GFP_KERNEL);
- if (!phba->rxtable) {
+ if (!phba->rx_monitor) {
+ phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
+ GFP_KERNEL);
+
+ if (!phba->rx_monitor) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2644 Failed to alloc memory "
"for RX Monitor Buffer\n");
return -ENOMEM;
}
+
+ /* Instruct the rx_monitor object to instantiate its ring */
+ if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
+ LPFC_MAX_RXMONITOR_ENTRY)) {
+ kfree(phba->rx_monitor);
+ phba->rx_monitor = NULL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2645 Failed to alloc memory "
+ "for RX Monitor's Ring\n");
+ return -ENOMEM;
+ }
}
- atomic_set(&phba->rxtable_idx_head, 0);
- atomic_set(&phba->rxtable_idx_tail, 0);
+
return 0;
}
@@ -8454,8 +8782,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
/*
- * This memory was allocated by the lpfc_read_sparam routine. Release
- * it to the mbuf pool.
+ * This memory was allocated by the lpfc_read_sparam routine but is
+ * no longer needed. It is released and ctx_buf NULLed to prevent
+ * unintended pointer access as the mbox is reused.
*/
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
@@ -8775,6 +9104,9 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
mempool_free(mboxq, phba->mbox_mem_pool);
+ /* Enable RAS FW log support */
+ lpfc_sli4_ras_setup(phba);
+
phba->hba_flag |= HBA_SETUP;
return rc;
@@ -10106,7 +10438,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
lockdep_assert_held(&phba->hbalock);
- if (piocb->iocb_cmpl && (!piocb->vport) &&
+ if (piocb->cmd_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -10144,24 +10476,14 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
- case CMD_GEN_REQUEST64_CR:
- case CMD_GEN_REQUEST64_CX:
- if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
- FC_RCTL_DD_UNSOL_CMD) ||
- (piocb->iocb.un.genreq64.w5.hcsw.Type !=
- MENLO_TRANSPORT_TYPE))
-
- goto iocb_busy;
- break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
* For IOCBs, like QUE_RING_BUF, that have no rsp ring
- * completion, iocb_cmpl MUST be 0.
+ * completion, cmd_cmpl MUST be 0.
*/
- if (piocb->iocb_cmpl)
- piocb->iocb_cmpl = NULL;
+ if (piocb->cmd_cmpl)
+ piocb->cmd_cmpl = NULL;
fallthrough;
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
@@ -10208,715 +10530,6 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
}
/**
- * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
- * @phba: Pointer to HBA context object.
- * @piocbq: Pointer to command iocb.
- * @sglq: Pointer to the scatter gather queue object.
- *
- * This routine converts the bpl or bde that is in the IOCB
- * to a sgl list for the sli4 hardware. The physical address
- * of the bpl/bde is converted back to a virtual address.
- * If the IOCB contains a BPL then the list of BDE's is
- * converted to sli4_sge's. If the IOCB contains a single
- * BDE then it is converted to a single sli_sge.
- * The IOCB is still in cpu endianess so the contents of
- * the bpl can be used without byte swapping.
- *
- * Returns valid XRI = Success, NO_XRI = Failure.
-**/
-static uint16_t
-lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
- struct lpfc_sglq *sglq)
-{
- uint16_t xritag = NO_XRI;
- struct ulp_bde64 *bpl = NULL;
- struct ulp_bde64 bde;
- struct sli4_sge *sgl = NULL;
- struct lpfc_dmabuf *dmabuf;
- IOCB_t *icmd;
- int numBdes = 0;
- int i = 0;
- uint32_t offset = 0; /* accumulated offset in the sg request list */
- int inbound = 0; /* number of sg reply entries inbound from firmware */
-
- if (!piocbq || !sglq)
- return xritag;
-
- sgl = (struct sli4_sge *)sglq->sgl;
- icmd = &piocbq->iocb;
- if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
- return sglq->sli4_xritag;
- if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = icmd->un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- /* The addrHigh and addrLow fields within the IOCB
- * have not been byteswapped yet so there is no
- * need to swap them back.
- */
- if (piocbq->context3)
- dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
- else
- return xritag;
-
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- if (!bpl)
- return xritag;
-
- for (i = 0; i < numBdes; i++) {
- /* Should already be byte swapped. */
- sgl->addr_hi = bpl->addrHigh;
- sgl->addr_lo = bpl->addrLow;
-
- sgl->word2 = le32_to_cpu(sgl->word2);
- if ((i+1) == numBdes)
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- else
- bf_set(lpfc_sli4_sge_last, sgl, 0);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- bde.tus.w = le32_to_cpu(bpl->tus.w);
- sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
- /* The offsets in the sgl need to be accumulated
- * separately for the request and reply lists.
- * The request is always first, the reply follows.
- */
- if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
- /* add up the reply sg entries */
- if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
- inbound++;
- /* first inbound? reset the offset */
- if (inbound == 1)
- offset = 0;
- bf_set(lpfc_sli4_sge_offset, sgl, offset);
- bf_set(lpfc_sli4_sge_type, sgl,
- LPFC_SGE_TYPE_DATA);
- offset += bde.tus.f.bdeSize;
- }
- sgl->word2 = cpu_to_le32(sgl->word2);
- bpl++;
- sgl++;
- }
- } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
- /* The addrHigh and addrLow fields of the BDE have not
- * been byteswapped yet so they need to be swapped
- * before putting them in the sgl.
- */
- sgl->addr_hi =
- cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
- sgl->addr_lo =
- cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
- sgl->word2 = le32_to_cpu(sgl->word2);
- bf_set(lpfc_sli4_sge_last, sgl, 1);
- sgl->word2 = cpu_to_le32(sgl->word2);
- sgl->sge_len =
- cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
- }
- return sglq->sli4_xritag;
-}
-
-/**
- * lpfc_sli4_iocb2wqe - Convert the IOCB to a work queue entry.
- * @phba: Pointer to HBA context object.
- * @iocbq: Pointer to command iocb.
- * @wqe: Pointer to the work queue entry.
- *
- * This routine converts the iocb command to its Work Queue Entry
- * equivalent. The wqe pointer should not have any fields set when
- * this routine is called because it will memcpy over them.
- * This routine does not set the CQ_ID or the WQEC bits in the
- * wqe.
- *
- * Returns: 0 = Success, IOCB_ERROR = Failure.
- **/
-static int
-lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
- union lpfc_wqe128 *wqe)
-{
- uint32_t xmit_len = 0, total_len = 0;
- uint8_t ct = 0;
- uint32_t fip;
- uint32_t abort_tag;
- uint8_t command_type = ELS_COMMAND_NON_FIP;
- uint8_t cmnd;
- uint16_t xritag;
- uint16_t abrt_iotag;
- struct lpfc_iocbq *abrtiocbq;
- struct ulp_bde64 *bpl = NULL;
- uint32_t els_id = LPFC_ELS_ID_DEFAULT;
- int numBdes, i;
- struct ulp_bde64 bde;
- struct lpfc_nodelist *ndlp;
- uint32_t *pcmd;
- uint32_t if_type;
-
- fip = phba->hba_flag & HBA_FIP_SUPPORT;
- /* The fcp commands will set command type */
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- command_type = FCP_COMMAND;
- else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
- command_type = ELS_COMMAND_FIP;
- else
- command_type = ELS_COMMAND_NON_FIP;
-
- if (phba->fcp_embed_io)
- memset(wqe, 0, sizeof(union lpfc_wqe128));
- /* Some of the fields are in the right position already */
- memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- /* The ct field has moved so reset */
- wqe->generic.wqe_com.word7 = 0;
- wqe->generic.wqe_com.word10 = 0;
-
- abort_tag = (uint32_t) iocbq->iotag;
- xritag = iocbq->sli4_xritag;
- /* words0-2 bpl convert bde */
- if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- bpl = (struct ulp_bde64 *)
- ((struct lpfc_dmabuf *)iocbq->context3)->virt;
- if (!bpl)
- return IOCB_ERROR;
-
- /* Should already be byte swapped. */
- wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
- wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
- /* swap the size field back to the cpu so we
- * can assign it to the sgl.
- */
- wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
- xmit_len = wqe->generic.bde.tus.f.bdeSize;
- total_len = 0;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- total_len += bde.tus.f.bdeSize;
- }
- } else
- xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
-
- iocbq->iocb.ulpIoTag = iocbq->iotag;
- cmnd = iocbq->iocb.ulpCommand;
-
- switch (iocbq->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
- ndlp = iocbq->context_un.ndlp;
- else
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- if (!iocbq->iocb.ulpLe) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2007 Only Limited Edition cmd Format"
- " supported 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
-
- wqe->els_req.payload_len = xmit_len;
- /* Els_reguest64 has a TMO */
- bf_set(wqe_tmo, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpTimeout);
- /* Need a VF for word 4 set the vf bit*/
- bf_set(els_req64_vf, &wqe->els_req, 0);
- /* And a VFID for word 12 */
- bf_set(els_req64_vfid, &wqe->els_req, 0);
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
- bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
- /* CCP CCPE PV PRI in word10 were set in the memcpy */
- if (command_type == ELS_COMMAND_FIP)
- els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
- >> LPFC_FIP_ELS_ID_SHIFT);
- pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
- iocbq->context2)->virt);
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
- *pcmd == ELS_CMD_SCR ||
- *pcmd == ELS_CMD_RDF ||
- *pcmd == ELS_CMD_EDC ||
- *pcmd == ELS_CMD_RSCN_XMT ||
- *pcmd == ELS_CMD_FDISC ||
- *pcmd == ELS_CMD_LOGO ||
- *pcmd == ELS_CMD_QFPA ||
- *pcmd == ELS_CMD_UVEM ||
- *pcmd == ELS_CMD_PLOGI)) {
- bf_set(els_req64_sp, &wqe->els_req, 1);
- bf_set(els_req64_sid, &wqe->els_req,
- iocbq->vport->fc_myDID);
- if ((*pcmd == ELS_CMD_FLOGI) &&
- !(phba->fc_topology ==
- LPFC_TOPOLOGY_LOOP))
- bf_set(els_req64_sid, &wqe->els_req, 0);
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- } else if (pcmd && iocbq->context1) {
- bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
- bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- }
- }
- bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
- bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
- wqe->els_req.max_response_payload_len = total_len - xmit_len;
- break;
- case CMD_XMIT_SEQUENCE64_CX:
- bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.un.ulpWord[3]);
- bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- /* The entire sequence is transmitted for this IOCB */
- xmit_len = total_len;
- cmnd = CMD_XMIT_SEQUENCE64_CR;
- if (phba->link_flag & LS_LOOPBACK_MODE)
- bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
- fallthrough;
- case CMD_XMIT_SEQUENCE64_CR:
- /* word3 iocb=io_tag32 wqe=reserved */
- wqe->xmit_sequence.rsvd3 = 0;
- /* word4 relative_offset memcpy */
- /* word5 r_ctl/df_ctl memcpy */
- bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
- bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
- LPFC_WQE_LENLOC_WORD12);
- bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
- wqe->xmit_sequence.xmit_len = xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_BCAST64_CN:
- /* word3 iocb=iotag32 wqe=seq_payload_len */
- wqe->xmit_bcast64.seq_payload_len = xmit_len;
- /* word4 iocb=rsvd wqe=rsvd */
- /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
- /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
- bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
- break;
- case CMD_FCP_IWRITE64_CR:
- command_type = FCP_COMMAND_DATA_OUT;
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iwrite,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iwrite,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_IREAD64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_iread,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_iread,
- 0);
- /* word4 iocb=parameter wqe=total_xfer_length memcpy */
- /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
- bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
- /* Always open the exchange */
- bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
- LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
- if (phba->cfg_enable_pbde)
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
- else
- bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_FCP_ICMND64_CR:
- /* word3 iocb=iotag wqe=payload_offset_len */
- /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
- bf_set(payload_offset_len, &wqe->fcp_icmd,
- xmit_len + sizeof(struct fcp_rsp));
- bf_set(cmd_buff_len, &wqe->fcp_icmd,
- 0);
- /* word3 iocb=IO_TAG wqe=reserved */
- bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
- /* Always open the exchange */
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
- iocbq->iocb.ulpFCP2Rcvy);
- if (iocbq->iocb_flag & LPFC_IO_OAS) {
- bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
- if (iocbq->priority) {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (iocbq->priority << 1));
- } else {
- bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
- (phba->cfg_XLanePriority << 1));
- }
- }
- /* Note, word 10 is already initialized to 0 */
-
- if (phba->fcp_embed_io) {
- struct lpfc_io_buf *lpfc_cmd;
- struct sli4_sge *sgl;
- struct fcp_cmnd *fcp_cmnd;
- uint32_t *ptr;
-
- /* 128 byte wqe support here */
-
- lpfc_cmd = iocbq->context1;
- sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
- fcp_cmnd = lpfc_cmd->fcp_cmnd;
-
- /* Word 0-2 - FCP_CMND */
- wqe->generic.bde.tus.f.bdeFlags =
- BUFF_TYPE_BDE_IMMED;
- wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
- wqe->generic.bde.addrHigh = 0;
- wqe->generic.bde.addrLow = 88; /* Word 22 */
-
- bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
-
- /* Word 22-29 FCP CMND Payload */
- ptr = &wqe->words[22];
- memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
- }
- break;
- case CMD_GEN_REQUEST64_CR:
- /* For this command calculate the xmit length of the
- * request bde.
- */
- xmit_len = 0;
- numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
- sizeof(struct ulp_bde64);
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- break;
- xmit_len += bde.tus.f.bdeSize;
- }
- /* word3 iocb=IO_TAG wqe=request_payload_len */
- wqe->gen_req.request_payload_len = xmit_len;
- /* word4 iocb=parameter wqe=relative_offset memcpy */
- /* word5 [rctl, type, df_ctl, la] copied in memcpy */
- /* word6 context tag copied in memcpy */
- if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
- ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2015 Invalid CT %x command 0x%x\n",
- ct, iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
- bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
- bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
- bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
- wqe->gen_req.max_response_payload_len = total_len - xmit_len;
- command_type = OTHER_COMMAND;
- break;
- case CMD_XMIT_ELS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* words0-2 BDE memcpy */
- /* word3 iocb=iotag32 wqe=response_payload_len */
- wqe->xmit_els_rsp.response_payload_len = xmit_len;
- /* word4 */
- wqe->xmit_els_rsp.word4 = 0;
- /* word5 iocb=rsvd wge=did */
- bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
- iocbq->iocb.un.xseq64.xmit_els_remoteID);
-
- if_type = bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf);
- if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
- if (iocbq->vport->fc_flag & FC_PT2PT) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- if (iocbq->vport->fc_myDID == Fabric_DID) {
- bf_set(wqe_els_did,
- &wqe->xmit_els_rsp.wqe_dest, 0);
- }
- }
- }
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
- bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
- iocbq->iocb.unsli3.rcvsli3.ox_id);
- if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[iocbq->vport->vpi]);
- bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
- bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
- LPFC_WQE_LENLOC_WORD3);
- bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
- bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
- phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
- if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
- bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
- bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
- iocbq->vport->fc_myDID);
- bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
- }
- command_type = OTHER_COMMAND;
- break;
- case CMD_CLOSE_XRI_CN:
- case CMD_ABORT_XRI_CN:
- case CMD_ABORT_XRI_CX:
- /* words 0-2 memcpy should be 0 rserved */
- /* port will send abts */
- abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
- if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
- abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
- fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
- } else
- fip = 0;
-
- if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
- /*
- * The link is down, or the command was ELS_FIP
- * so the fw does not need to send abts
- * on the wire.
- */
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
- else
- bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
- bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
- /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
- wqe->abort_cmd.rsrvd5 = 0;
- bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
- ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
- abort_tag = iocbq->iocb.un.acxri.abortIoTag;
- /*
- * The abort handler will send us CMD_ABORT_XRI_CN or
- * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
- */
- bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
- bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- cmnd = CMD_ABORT_XRI_CX;
- command_type = OTHER_COMMAND;
- xritag = 0;
- break;
- case CMD_XMIT_BLS_RSP64_CX:
- ndlp = (struct lpfc_nodelist *)iocbq->context1;
- /* As BLS ABTS RSP WQE is very different from other WQEs,
- * we re-construct this WQE here based on information in
- * iocbq from scratch.
- */
- memset(wqe, 0, sizeof(*wqe));
- /* OX_ID is invariable to who sent ABTS to CT exchange */
- bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
- if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
- LPFC_ABTS_UNSOL_INT) {
- /* ABTS sent by initiator to CT exchange, the
- * RX_ID field will be filled with the newly
- * allocated responder XRI.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- iocbq->sli4_xritag);
- } else {
- /* ABTS sent by responder to CT exchange, the
- * RX_ID field will be filled with the responder
- * RX_ID from ABTS.
- */
- bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
- bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
- }
- bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
- bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
-
- /* Use CT=VPI */
- bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
- ndlp->nlp_DID);
- bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
- iocbq->iocb.ulpContext);
- bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
- phba->vpi_ids[phba->pport->vpi]);
- bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
- bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
- LPFC_WQE_LENLOC_NONE);
- /* Overwrite the pre-set comnd type with OTHER_COMMAND */
- command_type = OTHER_COMMAND;
- if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
- bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
- bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
- bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
- bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
- }
-
- break;
- case CMD_SEND_FRAME:
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
- bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
- bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
- bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
- bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
- bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- return 0;
- case CMD_XRI_ABORTED_CX:
- case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
- case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
- case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
- case CMD_FCP_TRSP64_CX: /* Target mode rcv */
- case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "2014 Invalid command 0x%x\n",
- iocbq->iocb.ulpCommand);
- return IOCB_ERROR;
- }
-
- if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
- else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
- bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
- iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
- LPFC_IO_DIF_INSERT);
- bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
- bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
- wqe->generic.wqe_com.abort_tag = abort_tag;
- bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
- bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
- bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
- bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
- return 0;
-}
-
-/**
* __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
* @phba: Pointer to HBA context object.
* @ring_number: SLI ring number to issue wqe on.
@@ -10924,7 +10537,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
* @flag: Flag indicating if this command can be put into txq.
*
* __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
- * send an iocb command to an HBA with SLI-4 interface spec.
+ * send an iocb command to an HBA with SLI-3 interface spec.
*
* This function takes the hbalock before invoking the lockless version.
* The function will return success after it successfully submit the wqe to
@@ -10962,10 +10575,17 @@ static int
__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
- int rc;
- struct lpfc_io_buf *lpfc_cmd =
- (struct lpfc_io_buf *)piocb->context1;
- union lpfc_wqe128 *wqe = &piocb->wqe;
+ struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
+
+ lpfc_prep_embed_io(phba, lpfc_cmd);
+ return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
+}
+
+void
+lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+{
+ struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
+ union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
struct sli4_sge *sgl;
/* 128 byte wqe support here */
@@ -11003,19 +10623,17 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
/* add the VMID tags as per switch response */
- if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
- if (phba->pport->vmid_priority_tagging) {
+ if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
+ if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
(piocb->vmid_tag.cs_ctl_vmid));
- } else {
+ } else if (phba->cfg_vmid_app_header) {
bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
wqe->words[31] = piocb->vmid_tag.app_id;
}
}
- rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
- return rc;
}
/**
@@ -11037,13 +10655,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
+ union lpfc_wqe128 *wqe;
struct lpfc_queue *wq;
struct lpfc_sli_ring *pring;
+ u32 ulp_command = get_job_cmnd(phba, piocb);
/* Get the WQ */
- if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if ((piocb->cmd_flag & LPFC_IO_FCP) ||
+ (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
} else {
wq = phba->sli4_hba.els_wq;
@@ -11057,34 +10676,24 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/
lockdep_assert_held(&pring->ring_lock);
-
+ wqe = &piocb->wqe;
if (piocb->sli4_xritag == NO_XRI) {
- if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
- piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+ if (ulp_command == CMD_ABORT_XRI_CX)
sglq = NULL;
else {
- if (!list_empty(&pring->txq)) {
+ sglq = __lpfc_sli_get_els_sglq(phba, piocb);
+ if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
- pring, piocb);
+ pring,
+ piocb);
return IOCB_SUCCESS;
} else {
return IOCB_BUSY;
}
- } else {
- sglq = __lpfc_sli_get_els_sglq(phba, piocb);
- if (!sglq) {
- if (!(flag & SLI_IOCB_RET_IOCB)) {
- __lpfc_sli_ringtx_put(phba,
- pring,
- piocb);
- return IOCB_SUCCESS;
- } else
- return IOCB_BUSY;
- }
}
}
- } else if (piocb->iocb_flag & LPFC_IO_FCP) {
+ } else if (piocb->cmd_flag & LPFC_IO_FCP) {
/* These IO's already have an XRI and a mapped sgl. */
sglq = NULL;
}
@@ -11101,15 +10710,26 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (sglq) {
piocb->sli4_lxritag = sglq->sli4_lxritag;
piocb->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
+
+ /* ABTS sent by initiator to CT exchange, the
+ * RX_ID field will be filled with the newly
+ * allocated responder XRI.
+ */
+ if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
+ piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
+ bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
+ piocb->sli4_xritag);
+
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
+ piocb->sli4_xritag);
+
+ if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
return IOCB_ERROR;
}
- if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+ if (lpfc_sli4_wq_put(wq, wqe))
return IOCB_ERROR;
- if (lpfc_sli4_wq_put(wq, &wqe))
- return IOCB_ERROR;
lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
return 0;
@@ -11152,6 +10772,411 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
}
+static void
+__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+
+ if (expect_rsp) {
+ cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
+ cmd->un.elsreq64.remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
+ cmd->ulpTimeout = tmo;
+ } else {
+ cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
+ cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
+ cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
+ cmd->ulpPU = PARM_NPIV_DID;
+ }
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpClass = CLASS3;
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ if (expect_rsp) {
+ cmd->un.elsreq64.myID = vport->fc_myDID;
+
+ /* For ELS_REQUEST64_CR, use the VPI by default */
+ cmd->ulpContext = phba->vpi_ids[vport->vpi];
+ }
+
+ cmd->ulpCt_h = 0;
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ cmd->ulpCt_l = 0; /* context = invalid RPI */
+ else
+ cmd->ulpCt_l = 1; /* context = VPI */
+ }
+}
+
+static void
+__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport,
+ struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
+ u32 elscmd, u8 tmo, u8 expect_rsp)
+{
+ struct lpfc_hba *phba = vport->phba;
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64_le *bde;
+ u8 els_id;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 0 - 2 BDE */
+ bde = (struct ulp_bde64_le *)&wqe->generic.bde;
+ bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
+ bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
+ bde->type_size = cpu_to_le32(cmd_size);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ if (expect_rsp) {
+ bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
+
+ /* Transfer length */
+ wqe->els_req.payload_len = cmd_size;
+ wqe->els_req.max_response_payload_len = FCELSSIZE;
+
+ /* DID */
+ bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
+
+ /* Word 11 - ELS_ID */
+ switch (elscmd) {
+ case ELS_CMD_PLOGI:
+ els_id = LPFC_ELS_ID_PLOGI;
+ break;
+ case ELS_CMD_FLOGI:
+ els_id = LPFC_ELS_ID_FLOGI;
+ break;
+ case ELS_CMD_LOGO:
+ els_id = LPFC_ELS_ID_LOGO;
+ break;
+ case ELS_CMD_FDISC:
+ if (!vport->fc_myDID) {
+ els_id = LPFC_ELS_ID_FDISC;
+ break;
+ }
+ fallthrough;
+ default:
+ els_id = LPFC_ELS_ID_DEFAULT;
+ break;
+ }
+
+ bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+ } else {
+ /* DID */
+ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
+
+ /* Transfer length */
+ wqe->xmit_els_rsp.response_payload_len = cmd_size;
+
+ bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
+ CMD_XMIT_ELS_RSP64_WQE);
+ }
+
+ bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
+ bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+
+ /* If we have NPIV enabled, we want to send ELS traffic by VPI.
+ * For SLI4, since the driver controls VPIs we also want to include
+ * all ELS pt2pt protocol traffic as well.
+ */
+ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
+ (vport->fc_flag & FC_PT2PT)) {
+ if (expect_rsp) {
+ bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
+
+ /* For ELS_REQUEST64_WQE, use the VPI by default */
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[vport->vpi]);
+ }
+
+ /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
+ if (elscmd == ELS_CMD_ECHO)
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
+ else
+ bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
+ }
+}
+
+void
+lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+ u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
+ u8 expect_rsp)
+{
+ phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
+ elscmd, tmo, expect_rsp);
+}
+
+static void
+__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ IOCB_t *cmd;
+
+ cmd = &cmdiocbq->iocb;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
+
+ cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
+ cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
+ cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+
+ cmd->ulpContext = rpi;
+ cmd->ulpClass = CLASS3;
+ cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+ cmd->ulpBdeCount = 1;
+ cmd->ulpLe = 1;
+ cmd->ulpOwner = OWN_CHIP;
+ cmd->ulpTimeout = tmo;
+}
+
+static void
+__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
+ u16 rpi, u32 num_entry, u8 tmo)
+{
+ union lpfc_wqe128 *cmdwqe;
+ struct ulp_bde64_le *bde, *bpl;
+ u32 xmit_len = 0, total_len = 0, size, type, i;
+
+ cmdwqe = &cmdiocbq->wqe;
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
+
+ /* Calculate total_len and xmit_len */
+ bpl = (struct ulp_bde64_le *)bmp->virt;
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ total_len += size;
+ }
+ for (i = 0; i < num_entry; i++) {
+ size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
+ type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
+ if (type != ULP_BDE64_TYPE_BDE_64)
+ break;
+ xmit_len += size;
+ }
+
+ /* Words 0 - 2 */
+ bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
+ bde->addr_low = bpl->addr_low;
+ bde->addr_high = bpl->addr_high;
+ bde->type_size = cpu_to_le32(xmit_len);
+ bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
+
+ /* Word 3 */
+ cmdwqe->gen_req.request_payload_len = xmit_len;
+
+ /* Word 5 */
+ bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
+ bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
+ bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
+ bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
+ bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
+ bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
+
+ /* Word 12 */
+ cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
+}
+
+void
+lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
+{
+ phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ IOCB_t *icmd;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+ icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
+ icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+ icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
+ icmd->un.xseq64.w5.hcsw.Fctl = LA;
+ if (last_seq)
+ icmd->un.xseq64.w5.hcsw.Fctl |= LS;
+ icmd->un.xseq64.w5.hcsw.Dfctl = 0;
+ icmd->un.xseq64.w5.hcsw.Rctl = rctl;
+ icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
+
+ icmd->ulpBdeCount = 1;
+ icmd->ulpLe = 1;
+ icmd->ulpClass = CLASS3;
+
+ switch (cr_cx_cmd) {
+ case CMD_XMIT_SEQUENCE64_CR:
+ icmd->ulpContext = rpi;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+ break;
+ case CMD_XMIT_SEQUENCE64_CX:
+ icmd->ulpContext = ox_id;
+ icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ union lpfc_wqe128 *wqe;
+ struct ulp_bde64 *bpl;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Words 0 - 2 */
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
+ wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
+ wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
+
+ /* Word 5 */
+ bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
+ bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
+ bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
+ bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
+
+ bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
+ CMD_XMIT_SEQUENCE64_WQE);
+
+ /* Word 7 */
+ bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
+
+ /* Word 9 */
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
+
+ /* Word 12 */
+ if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
+ wqe->xmit_sequence.xmit_len = full_size;
+ else
+ wqe->xmit_sequence.xmit_len =
+ wqe->xmit_sequence.bde.tus.f.bdeSize;
+}
+
+void
+lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
+ u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
+{
+ phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
+ rctl, last_seq, cr_cx_cmd);
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
+{
+ IOCB_t *icmd = NULL;
+
+ icmd = &cmdiocbq->iocb;
+ memset(icmd, 0, sizeof(*icmd));
+
+ /* Word 5 */
+ icmd->un.acxri.abortContextTag = ulp_context;
+ icmd->un.acxri.abortIoTag = iotag;
+
+ if (ia) {
+ /* Word 7 */
+ icmd->ulpCommand = CMD_CLOSE_XRI_CN;
+ } else {
+ /* Word 3 */
+ icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
+
+ /* Word 7 */
+ icmd->ulpClass = ulp_class;
+ icmd->ulpCommand = CMD_ABORT_XRI_CN;
+ }
+
+ /* Word 7 */
+ icmd->ulpLe = 1;
+}
+
+static void
+__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
+ u16 iotag, u8 ulp_class, u16 cqid, bool ia,
+ bool wqec)
+{
+ union lpfc_wqe128 *wqe;
+
+ wqe = &cmdiocbq->wqe;
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Word 3 */
+ bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+ if (ia)
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+ else
+ bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
+
+ /* Word 8 */
+ wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
+
+ /* Word 10 */
+ bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+
+ /* Word 11 */
+ if (wqec)
+ bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
+ bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+}
+
+void
+lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
+ u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
+ bool ia, bool wqec)
+{
+ phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
+ cqid, ia, wqec);
+}
+
/**
* lpfc_sli_api_table_setup - Set up sli api function jump table
* @phba: The hba struct for which this call is being executed.
@@ -11170,11 +11195,19 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
break;
case LPFC_PCI_DEV_OC:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
+ phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
+ phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
+ phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
+ phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11182,7 +11215,6 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
dev_grp);
return -ENODEV;
}
- phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
return 0;
}
@@ -11201,15 +11233,15 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
{
struct lpfc_io_buf *lpfc_cmd;
- if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (unlikely(!phba->sli4_hba.hdwq))
return NULL;
/*
* for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
+ if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
+ lpfc_cmd = piocb->io_buf;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
@@ -11243,7 +11275,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
unsigned long iflags;
int rc;
+ /* If the PCI channel is in offline state, do not post iocbs. */
+ if (unlikely(pci_channel_offline(phba->pcidev)))
+ return IOCB_ERROR;
+
if (phba->sli_rev == LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
pring = lpfc_sli4_calc_ring(phba, piocb);
@@ -11380,8 +11418,8 @@ lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"3095 Event Context not found, no "
"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
- iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
- vpi, rpi);
+ vpi, rpi, iocbq->iocb.ulpStatus,
+ iocbq->iocb.ulpContext);
}
/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
@@ -12229,47 +12267,33 @@ static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- IOCB_t *irsp = &rspiocb->iocb;
- uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb = NULL;
-
- if (irsp->ulpStatus) {
+ u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
+ u32 ulp_word4 = get_job_word4(phba, rspiocb);
+ u8 cmnd = get_job_cmnd(phba, cmdiocb);
+ if (ulp_status) {
/*
* Assume that the port already completed and returned, or
* will return the iocb. Just Log the message.
*/
- abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
- abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
-
- spin_lock_irq(&phba->hbalock);
if (phba->sli_rev < LPFC_SLI_REV4) {
- if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
- irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
- irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
- spin_unlock_irq(&phba->hbalock);
+ if (cmnd == CMD_ABORT_XRI_CX &&
+ ulp_status == IOSTAT_LOCAL_REJECT &&
+ ulp_word4 == IOERR_ABORT_REQUESTED) {
goto release_iocb;
}
- if (abort_iotag != 0 &&
- abort_iotag <= phba->sli.last_iotag)
- abort_iocb =
- phba->sli.iocbq_lookup[abort_iotag];
- } else
- /* For sli4 the abort_tag is the XRI,
- * so the abort routine puts the iotag of the iocb
- * being aborted in the context field of the abort
- * IOCB.
- */
- abort_iocb = phba->sli.iocbq_lookup[abort_context];
+ }
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb x%px "
- "with tag %x context %x, abort status %x, "
- "abort code %x\n",
- abort_iocb, abort_iotag, abort_context,
- irsp->ulpStatus, irsp->un.ulpWord[4]);
+ "with io cmd xri %x abort tag : x%x, "
+ "abort status %x abort code %x\n",
+ cmdiocb, get_job_abtsiotag(phba, cmdiocb),
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ get_wqe_reqtag(cmdiocb) :
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ ulp_status, ulp_word4);
- spin_unlock_irq(&phba->hbalock);
}
release_iocb:
lpfc_sli_release_iocbq(phba, cmdiocb);
@@ -12291,26 +12315,46 @@ void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_nodelist *ndlp = NULL;
- IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
+ IOCB_t *irsp;
+ LPFC_MBOXQ_t *mbox;
+ u32 ulp_command, ulp_status, ulp_word4, iotag;
+
+ ulp_command = get_job_cmnd(phba, cmdiocb);
+ ulp_status = get_job_ulpstatus(phba, rspiocb);
+ ulp_word4 = get_job_word4(phba, rspiocb);
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = get_wqe_reqtag(cmdiocb);
+ } else {
+ irsp = &rspiocb->iocb;
+ iotag = irsp->ulpIoTag;
+
+ /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
+ * The MBX_REG_LOGIN64 mbox command is freed back to the
+ * mbox_mem_pool here.
+ */
+ if (cmdiocb->context_un.mbox) {
+ mbox = cmdiocb->context_un.mbox;
+ lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ cmdiocb->context_un.mbox = NULL;
+ }
+ }
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0139 Ignoring ELS cmd code x%x completion Data: "
- "x%x x%x x%x\n",
- irsp->ulpIoTag, irsp->ulpStatus,
- irsp->un.ulpWord[4], irsp->ulpTimeout);
+ "x%x x%x x%x x%px\n",
+ ulp_command, ulp_status, ulp_word4, iotag,
+ cmdiocb->ndlp);
/*
* Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
* if exchange is busy.
*/
- if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
- ndlp = cmdiocb->context_un.ndlp;
+ if (ulp_command == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
- } else {
- ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
+ else
lpfc_els_free_iocb(phba, cmdiocb);
- }
lpfc_nlp_put(ndlp);
}
@@ -12336,28 +12380,29 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
- IOCB_t *icmd = NULL;
- IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
unsigned long iflags;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
+ u32 ulp_command = get_job_cmnd(phba, cmdiocb);
+ u16 ulp_context, iotag;
+ bool ia;
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
* being aborted.
*/
- icmd = &cmdiocb->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
- cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
+ if (ulp_command == CMD_ABORT_XRI_WQE ||
+ ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
return IOCB_ABORTING;
if (!pring) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
return retval;
}
@@ -12367,10 +12412,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
*/
if ((vport->load_flag & FC_UNLOADING) &&
pring->ringno == LPFC_ELS_RING) {
- if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
else
- cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
return retval;
}
@@ -12382,43 +12427,47 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* This signals the response to set the correct status
* before calling the completion handler
*/
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
- iabt = &abtsiocbp->iocb;
- iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
- iabt->un.acxri.abortContextTag = icmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4) {
- iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
- if (pring->ringno == LPFC_ELS_RING)
- iabt->un.acxri.abortContextTag = cmdiocb->iotag;
+ ulp_context = cmdiocb->sli4_xritag;
+ iotag = abtsiocbp->iotag;
} else {
- iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+ iotag = cmdiocb->iocb.ulpIoTag;
if (pring->ringno == LPFC_ELS_RING) {
- ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
- iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
+ ndlp = cmdiocb->ndlp;
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = cmdiocb->iocb.ulpContext;
}
}
- iabt->ulpLe = 1;
- iabt->ulpClass = icmd->ulpClass;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
- abtsiocbp->iocb_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
- if (cmdiocb->iocb_flag & LPFC_IO_FOF)
- abtsiocbp->iocb_flag |= LPFC_IO_FOF;
if (phba->link_state < LPFC_LINK_UP ||
(phba->sli_rev == LPFC_SLI_REV4 &&
- phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN))
- iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+ phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
+ (phba->link_flag & LS_EXTERNAL_LOOPBACK))
+ ia = true;
else
- iabt->ulpCommand = CMD_ABORT_XRI_CN;
+ ia = false;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
+ cmdiocb->iocb.ulpClass,
+ LPFC_WQE_CQ_ID_DEFAULT, ia, false);
+
+ abtsiocbp->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
+
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocbp->cmd_flag |= LPFC_IO_FOF;
if (cmpl)
- abtsiocbp->iocb_cmpl = cmpl;
+ abtsiocbp->cmd_cmpl = cmpl;
else
- abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+ abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
abtsiocbp->vport = vport;
if (phba->sli_rev == LPFC_SLI_REV4) {
@@ -12438,14 +12487,14 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abort_iotag_exit:
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
- "0339 Abort xri x%x, original iotag x%x, "
- "abort cmd iotag x%x retval x%x\n",
- iabt->un.acxri.abortIoTag,
- iabt->un.acxri.abortContextTag,
- abtsiocbp->iotag, retval);
-
+ "0339 Abort IO XRI x%x, Original iotag x%x, "
+ "abort tag x%x Cmdjob : x%px Abortjob : x%px "
+ "retval x%x\n",
+ ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
+ retval);
if (retval) {
- cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocbp);
}
@@ -12503,7 +12552,7 @@ static int
lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
struct lpfc_vport *vport)
{
- IOCB_t *icmd = NULL;
+ u8 ulp_command;
/* No null ptr vports */
if (!iocbq || iocbq->vport != vport)
@@ -12512,12 +12561,13 @@ lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
/* iocb must be for FCP IO, already exists on the TX cmpl queue,
* can't be premarked as driver aborted, nor be an ABORT iocb itself
*/
- icmd = &iocbq->iocb;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
- (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
- (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN))
+ ulp_command = get_job_cmnd(vport->phba, iocbq);
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
+ (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE))
return -EINVAL;
return 0;
@@ -12609,9 +12659,9 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
- IOCB_t *icmd = NULL;
int sum, i;
unsigned long iflags;
+ u8 ulp_command;
spin_lock_irqsave(&phba->hbalock, iflags);
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
@@ -12619,14 +12669,15 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
if (!iocbq || iocbq->vport != vport)
continue;
- if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
+ if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
continue;
/* Include counting outstanding aborts */
- icmd = &iocbq->iocb;
- if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
- icmd->ulpCommand == CMD_CLOSE_XRI_CN) {
+ ulp_command = get_job_cmnd(phba, iocbq);
+ if (ulp_command == CMD_ABORT_XRI_CN ||
+ ulp_command == CMD_CLOSE_XRI_CN ||
+ ulp_command == CMD_ABORT_XRI_WQE) {
sum++;
continue;
}
@@ -12641,33 +12692,6 @@ lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
}
/**
- * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
- * @phba: Pointer to HBA context object
- * @cmdiocb: Pointer to command iocb object.
- * @wcqe: pointer to the complete wcqe
- *
- * This function is called when an aborted FCP iocb completes. This
- * function is called by the ring event handler with no lock held.
- * This function frees the iocb. It is called for sli-4 adapters.
- **/
-void
-lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
- struct lpfc_wcqe_complete *wcqe)
-{
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3017 ABORT_XRI_CN completing on rpi x%x "
- "original iotag x%x, abort cmd iotag x%x "
- "status 0x%x, reason 0x%x\n",
- cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag,
- (bf_get(lpfc_wcqe_c_status, wcqe)
- & LPFC_IOCB_STATUS_MASK),
- wcqe->parameter);
- lpfc_sli_release_iocbq(phba, cmdiocb);
-}
-
-/**
* lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
@@ -12682,13 +12706,15 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3096 ABORT_XRI_CN completing on rpi x%x "
+ "3096 ABORT_XRI_CX completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"status 0x%x, reason 0x%x\n",
+ (phba->sli_rev == LPFC_SLI_REV4) ?
+ cmdiocb->sli4_xritag :
cmdiocb->iocb.un.acxri.abortContextTag,
- cmdiocb->iocb.un.acxri.abortIoTag,
- cmdiocb->iotag, rspiocb->iocb.ulpStatus,
- rspiocb->iocb.un.ulpWord[4]);
+ get_job_abtsiotag(phba, cmdiocb),
+ cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
+ get_job_word4(phba, rspiocb));
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -12729,7 +12755,6 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
int errcnt = 0, ret_val = 0;
unsigned long iflags;
int i;
- void *fcp_cmpl = NULL;
/* all I/Os are in process of being flushed */
if (phba->hba_flag & HBA_IOQ_FLUSH)
@@ -12748,13 +12773,11 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
spin_lock_irqsave(&phba->hbalock, iflags);
if (phba->sli_rev == LPFC_SLI_REV3) {
pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
- fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
} else if (phba->sli_rev == LPFC_SLI_REV4) {
pring = lpfc_sli4_calc_ring(phba, iocbq);
- fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
}
ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
- fcp_cmpl);
+ lpfc_sli_abort_fcp_cmpl);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (ret_val != IOCB_SUCCESS)
errcnt++;
@@ -12796,12 +12819,13 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
struct lpfc_hba *phba = vport->phba;
struct lpfc_io_buf *lpfc_cmd;
struct lpfc_iocbq *abtsiocbq;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
struct lpfc_iocbq *iocbq;
- IOCB_t *icmd;
int sum, i, ret_val;
unsigned long iflags;
struct lpfc_sli_ring *pring_s4 = NULL;
+ u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
+ bool ia;
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -12846,8 +12870,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
* If the iocbq is already being aborted, don't take a second
* action, but do count it.
*/
- if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
- !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
+ !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
if (phba->sli_rev == LPFC_SLI_REV4)
spin_unlock(&pring_s4->ring_lock);
spin_unlock(&lpfc_cmd->buf_lock);
@@ -12863,41 +12887,50 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
continue;
}
- icmd = &iocbq->iocb;
- abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
- abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
- if (phba->sli_rev == LPFC_SLI_REV4)
- abtsiocbq->iocb.un.acxri.abortIoTag =
- iocbq->sli4_xritag;
- else
- abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
- abtsiocbq->iocb.ulpLe = 1;
- abtsiocbq->iocb.ulpClass = icmd->ulpClass;
- abtsiocbq->vport = vport;
-
- /* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
- if (iocbq->iocb_flag & LPFC_IO_FCP)
- abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (iocbq->iocb_flag & LPFC_IO_FOF)
- abtsiocbq->iocb_flag |= LPFC_IO_FOF;
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ iotag = abtsiocbq->iotag;
+ ulp_context = iocbq->sli4_xritag;
+ cqid = lpfc_cmd->hdwq->io_cq_map;
+ } else {
+ iotag = iocbq->iocb.ulpIoTag;
+ if (pring->ringno == LPFC_ELS_RING) {
+ ndlp = iocbq->ndlp;
+ ulp_context = ndlp->nlp_rpi;
+ } else {
+ ulp_context = iocbq->iocb.ulpContext;
+ }
+ }
ndlp = lpfc_cmd->rdata->pnode;
if (lpfc_is_link_up(phba) &&
- (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
- abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
+ (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
+ !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
+ ia = false;
else
- abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
+ ia = true;
+
+ lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
+ iocbq->iocb.ulpClass, cqid,
+ ia, false);
+
+ abtsiocbq->vport = vport;
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
+ if (iocbq->cmd_flag & LPFC_IO_FCP)
+ abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (iocbq->cmd_flag & LPFC_IO_FOF)
+ abtsiocbq->cmd_flag |= LPFC_IO_FOF;
/* Setup callback routine and issue the command. */
- abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
+ abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
/*
* Indicate the IO is being aborted by the driver and set
* the caller's flag into the aborted IO.
*/
- iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
+ iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
@@ -12944,9 +12977,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
wait_queue_head_t *pdone_q;
unsigned long iflags;
struct lpfc_io_buf *lpfc_cmd;
+ size_t offset = offsetof(struct lpfc_iocbq, wqe);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+ if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
/*
* A time out has occurred for the iocb. If a time out
@@ -12955,26 +12989,27 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
*/
spin_unlock_irqrestore(&phba->hbalock, iflags);
- cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
- cmdiocbq->wait_iocb_cmpl = NULL;
- if (cmdiocbq->iocb_cmpl)
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
+ cmdiocbq->wait_cmd_cmpl = NULL;
+ if (cmdiocbq->cmd_cmpl)
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
else
lpfc_sli_release_iocbq(phba, cmdiocbq);
return;
}
- cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
- if (cmdiocbq->context2 && rspiocbq)
- memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
- &rspiocbq->iocb, sizeof(IOCB_t));
+ /* Copy the contents of the local rspiocb into the caller's buffer. */
+ cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
+ if (cmdiocbq->rsp_iocb && rspiocbq)
+ memcpy((char *)cmdiocbq->rsp_iocb + offset,
+ (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
/* Set the exchange busy flag for task management commands */
- if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
- !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
- cur_iocbq);
- if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
+ cur_iocbq);
+ if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
else
lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
@@ -12993,7 +13028,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
* @piocbq: Pointer to command iocb.
* @flag: Flag to test.
*
- * This routine grabs the hbalock and then test the iocb_flag to
+ * This routine grabs the hbalock and then test the cmd_flag to
* see if the passed in flag is set.
* Returns:
* 1 if flag is set.
@@ -13007,7 +13042,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
int ret;
spin_lock_irqsave(&phba->hbalock, iflags);
- ret = piocbq->iocb_flag & flag;
+ ret = piocbq->cmd_flag & flag;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
@@ -13022,14 +13057,14 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. The iocb_cmpl field of the shall be used
+ * iocb to complete. The cmd_cmpl field of the shall be used
* to handle iocbs which time out. If the field is NULL, the
* function shall free the iocbq structure. If more clean up is
* needed, the caller is expected to provide a completion function
* that will provide the needed clean up. If the iocb command is
* not completed within timeout seconds, the function will either
- * free the iocbq structure (if iocb_cmpl == NULL) or execute the
- * completion function set in the iocb_cmpl field and then return
+ * free the iocbq structure (if cmd_cmpl == NULL) or execute the
+ * completion function set in the cmd_cmpl field and then return
* a status of IOCB_TIMEDOUT. The caller should not free the iocb
* resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
@@ -13041,7 +13076,7 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* This function assumes that the iocb completions occur while
* this function sleep. So, this function cannot be called from
* the thread which process iocb completion for this ring.
- * This function clears the iocb_flag of the iocb object before
+ * This function clears the cmd_flag of the iocb object before
* issuing the iocb and the iocb completion handler sets this
* flag and wakes this thread when the iocb completes.
* The contents of the response iocb will be copied to prspiocbq
@@ -13067,24 +13102,26 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
unsigned long iflags;
bool iocb_completed = true;
- if (phba->sli_rev >= LPFC_SLI_REV4)
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
+ lpfc_sli_prep_wqe(phba, piocb);
+
pring = lpfc_sli4_calc_ring(phba, piocb);
- else
+ } else
pring = &phba->sli.sli3_ring[ring_number];
/*
- * If the caller has provided a response iocbq buffer, then context2
+ * If the caller has provided a response iocbq buffer, then rsp_iocb
* is NULL or its an error.
*/
if (prspiocbq) {
- if (piocb->context2)
+ if (piocb->rsp_iocb)
return IOCB_ERROR;
- piocb->context2 = prspiocbq;
+ piocb->rsp_iocb = prspiocbq;
}
- piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
- piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
+ piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
+ piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
+ piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -13102,7 +13139,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
spin_lock_irqsave(&phba->hbalock, iflags);
- if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
+ if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
/*
* IOCB timed out. Inform the wake iocb wait
@@ -13110,7 +13147,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
*/
iocb_completed = false;
- piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (iocb_completed) {
@@ -13162,10 +13199,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
}
if (prspiocbq)
- piocb->context2 = NULL;
+ piocb->rsp_iocb = NULL;
piocb->context_un.wait_queue = NULL;
- piocb->iocb_cmpl = NULL;
+ piocb->cmd_cmpl = NULL;
return retval;
}
@@ -13371,6 +13408,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t if_type, portsmphr;
struct lpfc_register portstat_reg;
+ u32 logmask;
/*
* For now, use the SLI4 device internal unrecoverable error
@@ -13421,7 +13459,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ logmask = LOG_TRACE_EVENT;
+ if (phba->work_status[0] ==
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+ logmask = LOG_SLI;
+ lpfc_printf_log(phba, KERN_ERR, logmask,
"2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
@@ -14100,135 +14143,19 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
- * @phba: pointer to lpfc hba data structure
- * @pIocbIn: pointer to the rspiocbq
- * @pIocbOut: pointer to the cmdiocbq
- * @wcqe: pointer to the complete wcqe
- *
- * This routine transfers the fields of a command iocbq to a response iocbq
- * by copying all the IOCB fields from command iocbq and transferring the
- * completion status information from the complete wcqe.
- **/
-static void
-lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
- struct lpfc_iocbq *pIocbIn,
- struct lpfc_iocbq *pIocbOut,
- struct lpfc_wcqe_complete *wcqe)
-{
- int numBdes, i;
- unsigned long iflags;
- uint32_t status, max_response;
- struct lpfc_dmabuf *dmabuf;
- struct ulp_bde64 *bpl, bde;
- size_t offset = offsetof(struct lpfc_iocbq, iocb);
-
- memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
- sizeof(struct lpfc_iocbq) - offset);
- /* Map WCQE parameters into irspiocb parameters */
- status = bf_get(lpfc_wcqe_c_status, wcqe);
- pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
- if (pIocbOut->iocb_flag & LPFC_IO_FCP)
- if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
- pIocbIn->iocb.un.fcpi.fcpi_parm =
- pIocbOut->iocb.un.fcpi.fcpi_parm -
- wcqe->total_data_placed;
- else
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- else {
- pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
- switch (pIocbOut->iocb.ulpCommand) {
- case CMD_ELS_REQUEST64_CR:
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- bde.tus.w = le32_to_cpu(bpl[1].tus.w);
- max_response = bde.tus.f.bdeSize;
- break;
- case CMD_GEN_REQUEST64_CR:
- max_response = 0;
- if (!pIocbOut->context3)
- break;
- numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
- sizeof(struct ulp_bde64);
- dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
- bpl = (struct ulp_bde64 *)dmabuf->virt;
- for (i = 0; i < numBdes; i++) {
- bde.tus.w = le32_to_cpu(bpl[i].tus.w);
- if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
- max_response += bde.tus.f.bdeSize;
- }
- break;
- default:
- max_response = wcqe->total_data_placed;
- break;
- }
- if (max_response < wcqe->total_data_placed)
- pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
- else
- pIocbIn->iocb.un.genreq64.bdl.bdeSize =
- wcqe->total_data_placed;
- }
-
- /* Convert BG errors for completion status */
- if (status == CQE_STATUS_DI_ERROR) {
- pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
-
- if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
- pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
- else
- pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
-
- pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
- if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_GUARD_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_APPTAG_ERR_MASK;
- if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_REFTAG_ERR_MASK;
-
- /* Check to see if there was any good data before the error */
- if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- BGS_HI_WATER_MARK_PRESENT_MASK;
- pIocbIn->iocb.unsli3.sli3_bg.bghm =
- wcqe->total_data_placed;
- }
-
- /*
- * Set ALL the error bits to indicate we don't know what
- * type of error it is.
- */
- if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
- pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
- (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
- BGS_GUARD_ERR_MASK);
- }
-
- /* Pick up HBA exchange busy condition */
- if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
-}
-
-/**
- * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
+ * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
* @phba: Pointer to HBA context object.
* @irspiocbq: Pointer to work-queue completion queue entry.
*
* This routine handles an ELS work-queue completion event and construct
- * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
+ * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
* discovery engine to handle.
*
* Return: Pointer to the receive IOCBQ, NULL otherwise.
**/
static struct lpfc_iocbq *
-lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
- struct lpfc_iocbq *irspiocbq)
+lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *irspiocbq)
{
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
@@ -14240,11 +14167,13 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
if (unlikely(!cmdiocbq)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
@@ -14254,13 +14183,18 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return NULL;
}
- spin_lock_irqsave(&pring->ring_lock, iflags);
+ memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
+ memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
+
/* Put the iocb back on the txcmplq */
lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- /* Fake the irspiocbq and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
return irspiocbq;
}
@@ -15061,7 +14995,6 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
{
struct lpfc_sli_ring *pring = cq->pring;
struct lpfc_iocbq *cmdiocbq;
- struct lpfc_iocbq irspiocbq;
unsigned long iflags;
/* Check for response status */
@@ -15087,9 +15020,9 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Look up the FCP command IOCB and create pseudo response IOCB */
spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0374 FCP complete with no corresponding "
@@ -15100,39 +15033,31 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
cmdiocbq->isr_timestamp = cq->isr_timestamp;
#endif
- if (cmdiocbq->iocb_cmpl == NULL) {
- if (cmdiocbq->wqe_cmpl) {
- /* For FCP the flag is cleared in wqe_cmpl */
- if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
- cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- }
+ if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
- /* Pass the cmd_iocb and the wcqe to the upper layer */
- (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
- return;
+ if (cmdiocbq->cmd_cmpl) {
+ /* For FCP the flag is cleared in cmd_cmpl */
+ if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
+ cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
}
+
+ /* Pass the cmd_iocb and the wcqe to the upper layer */
+ memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
+ sizeof(struct lpfc_wcqe_complete));
+ cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
+ } else {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function "
"iotag: (%d)\n",
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- return;
- }
-
- /* Only SLI4 non-IO commands stil use IOCB */
- /* Fake the irspiocb and copy necessary response information */
- lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
-
- if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
- spin_lock_irqsave(&phba->hbalock, iflags);
- cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
}
-
- /* Pass the cmd_iocb and the rsp state to the upper layer */
- (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
}
/**
@@ -16068,7 +15993,6 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
@@ -17990,8 +17914,8 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
- phba->sli4_hba.max_cfg_param.max_xri, 0);
+ xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri);
if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
spin_unlock_irq(&phba->hbalock);
return NO_XRI;
@@ -18443,7 +18367,6 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
case FC_RCTL_ELS_REP: /* extended link services reply */
case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
- case FC_RCTL_BA_NOP: /* basic link service NOP */
case FC_RCTL_BA_ABTS: /* basic link service abort */
case FC_RCTL_BA_RMC: /* remove connection */
case FC_RCTL_BA_ACC: /* basic accept */
@@ -18464,6 +18387,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
return lpfc_fc_frame_check(phba, fc_hdr);
+ case FC_RCTL_BA_NOP: /* basic link service NOP */
default:
goto drop;
}
@@ -18848,11 +18772,8 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
- struct lpfc_nodelist *ndlp;
-
if (cmd_iocbq) {
- ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
- lpfc_nlp_put(ndlp);
+ lpfc_nlp_put(cmd_iocbq->ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
}
@@ -18860,8 +18781,8 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3154 BLS ABORT RSP failed, data: x%x/x%x\n",
- rsp_iocbq->iocb.ulpStatus,
- rsp_iocbq->iocb.un.ulpWord[4]);
+ get_job_ulpstatus(phba, rsp_iocbq),
+ get_job_word4(phba, rsp_iocbq));
}
/**
@@ -18903,7 +18824,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
uint32_t sid, fctl;
- IOCB_t *icmd;
+ union lpfc_wqe128 *icmd;
int rc;
if (!lpfc_is_link_up(phba))
@@ -18931,32 +18852,22 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
if (!ctiocb)
return;
+ icmd = &ctiocb->wqe;
+
/* Extract the F_CTL field from FC_HDR */
fctl = sli4_fctl_from_fc_hdr(fc_hdr);
- icmd = &ctiocb->iocb;
- icmd->un.xseq64.bdl.bdeSize = 0;
- icmd->un.xseq64.bdl.ulpIoTag32 = 0;
- icmd->un.xseq64.w5.hcsw.Dfctl = 0;
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
- icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
-
- /* Fill in the rest of iocb fields */
- icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
- icmd->ulpBdeCount = 0;
- icmd->ulpLe = 1;
- icmd->ulpClass = CLASS3;
- icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- ctiocb->context1 = lpfc_nlp_get(ndlp);
- if (!ctiocb->context1) {
+ ctiocb->ndlp = lpfc_nlp_get(ndlp);
+ if (!ctiocb->ndlp) {
lpfc_sli_release_iocbq(phba, ctiocb);
return;
}
ctiocb->vport = phba->pport;
- ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
+ ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
ctiocb->sli4_lxritag = NO_XRI;
ctiocb->sli4_xritag = NO_XRI;
+ ctiocb->abort_rctl = FC_RCTL_BA_ACC;
if (fctl & FC_FC_EX_CTX)
/* Exchange responder sent the abort so we
@@ -18976,10 +18887,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
*/
if ((fctl & FC_FC_EX_CTX) &&
(lxri > lpfc_sli4_get_iocb_cnt(phba))) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
/* If BA_ABTS failed to abort a partially assembled receive sequence,
@@ -18987,10 +18900,12 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* the IOCB for a BA_RJT.
*/
if (aborted == false) {
- icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
- bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
- bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
- bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ ctiocb->abort_rctl = FC_RCTL_BA_RJT;
+ bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
+ bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_INV_XID);
+ bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
+ FC_BA_RJT_UNABLE);
}
if (fctl & FC_FC_EX_CTX) {
@@ -18998,31 +18913,41 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* of BA_ACC will use OX_ID from ABTS for the XRI_TAG
* field and RX_ID from ABTS for RX_ID field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
+ bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
} else {
/* ABTS sent by initiator to CT exchange, construction
* of BA_ACC will need to allocate a new XRI as for the
* XRI_TAG field.
*/
- bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
+ ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
}
- bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
- bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
+
+ /* OX_ID is invariable to who sent ABTS to CT exchange */
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
+ bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
+
+ /* Use CT=VPI */
+ bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
+ ndlp->nlp_DID);
+ bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
/* Xmit CT abts response on exchange <xid> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ ctiocb->abort_rctl, oxid, phba->link_state);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"2925 Failed to issue CT ABTS RSP x%x on "
"xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ ctiocb->abort_rctl, oxid,
phba->link_state);
lpfc_nlp_put(ndlp);
- ctiocb->context1 = NULL;
+ ctiocb->ndlp = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
}
@@ -19142,7 +19067,6 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
struct fc_frame_header *fc_hdr;
uint32_t sid;
uint32_t len, tot_len;
- struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
@@ -19155,40 +19079,40 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
- first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+ first_iocbq->wcqe_cmpl.total_data_placed = 0;
+ bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
first_iocbq->vport = vport;
/* Check FC Header to see what TYPE of frame we are rcv'ing */
if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
- first_iocbq->iocb.un.rcvels.parmRo =
- sli4_did_from_fc_hdr(fc_hdr);
- first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
- } else
- first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
- first_iocbq->iocb.ulpContext = NO_XRI;
- first_iocbq->iocb.unsli3.rcvsli3.ox_id =
- be16_to_cpu(fc_hdr->fh_ox_id);
- /* iocbq is prepped for internal consumption. Physical vpi. */
- first_iocbq->iocb.unsli3.rcvsli3.vpi =
- vport->phba->vpi_ids[vport->vpi];
- /* put the first buffer into the first IOCBq */
+ bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
+ sli4_did_from_fc_hdr(fc_hdr));
+ }
+
+ bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ NO_XRI);
+ bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
+ be16_to_cpu(fc_hdr->fh_ox_id));
+
+ /* put the first buffer into the first iocb */
tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
+ first_iocbq->bpl_dmabuf = NULL;
+ /* Keep track of the BDE count */
+ first_iocbq->wcqe_cmpl.word3 = 1;
- first_iocbq->context2 = &seq_dmabuf->dbuf;
- first_iocbq->context3 = NULL;
- first_iocbq->iocb.ulpBdeCount = 1;
if (tot_len > LPFC_DATA_BUF_SIZE)
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
-
- first_iocbq->iocb.un.rcvels.remoteID = sid;
+ first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+ first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
}
iocbq = first_iocbq;
/*
@@ -19200,30 +19124,25 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
- if (!iocbq->context3) {
- iocbq->context3 = d_buf;
- iocbq->iocb.ulpBdeCount++;
+ if (!iocbq->bpl_dmabuf) {
+ iocbq->bpl_dmabuf = d_buf;
+ iocbq->wcqe_cmpl.word3++;
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- if (len > LPFC_DATA_BUF_SIZE)
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
- else
- pbde->tus.f.bdeSize = len;
-
- iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+ iocbq->unsol_rcv_len = len;
+ iocbq->wcqe_cmpl.total_data_placed += len;
tot_len += len;
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
if (first_iocbq) {
- first_iocbq->iocb.ulpStatus =
- IOSTAT_FCP_RSP_ERROR;
- first_iocbq->iocb.un.ulpWord[4] =
- IOERR_NO_RESOURCES;
+ bf_set(lpfc_wcqe_c_status,
+ &first_iocbq->wcqe_cmpl,
+ IOSTAT_SUCCESS);
+ first_iocbq->wcqe_cmpl.parameter =
+ IOERR_NO_RESOURCES;
}
lpfc_in_buf_free(vport->phba, d_buf);
continue;
@@ -19232,19 +19151,21 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
- iocbq->context2 = d_buf;
- iocbq->context3 = NULL;
- iocbq->iocb.ulpBdeCount = 1;
+ iocbq->cmd_dmabuf = d_buf;
+ iocbq->bpl_dmabuf = NULL;
+ iocbq->wcqe_cmpl.word3 = 1;
+
if (len > LPFC_DATA_BUF_SIZE)
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ LPFC_DATA_BUF_SIZE;
else
- iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
+ iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
+ len;
tot_len += len;
- iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
-
- iocbq->iocb.un.rcvels.remoteID = sid;
+ iocbq->wcqe_cmpl.total_data_placed = tot_len;
+ bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
+ sid);
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
@@ -19276,16 +19197,18 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
if (!lpfc_complete_unsol_iocb(phba,
phba->sli4_hba.els_wq->pring,
iocbq, fc_hdr->fh_r_ctl,
- fc_hdr->fh_type))
+ fc_hdr->fh_type)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+ lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
+ }
/* Free iocb created in lpfc_prep_seq */
list_for_each_entry_safe(curr_iocb, next_iocb,
- &iocbq->list, list) {
+ &iocbq->list, list) {
list_del_init(&curr_iocb->list);
lpfc_sli_release_iocbq(phba, curr_iocb);
}
@@ -19296,7 +19219,7 @@ static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
- struct lpfc_dmabuf *pcmd = cmdiocb->context2;
+ struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
if (pcmd && pcmd->virt)
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
@@ -19312,7 +19235,7 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr;
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq = NULL;
- union lpfc_wqe *wqe;
+ union lpfc_wqe128 *pwqe;
struct lpfc_dmabuf *pcmd = NULL;
uint32_t frame_len;
int rc;
@@ -19347,34 +19270,46 @@ lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
/* copyin the payload */
memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
- /* fill in BDE's for command */
- iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
- iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
- iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
-
- iocbq->context2 = pcmd;
+ iocbq->cmd_dmabuf = pcmd;
iocbq->vport = vport;
- iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
- iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
+ iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
+ iocbq->num_bdes = 0;
+
+ pwqe = &iocbq->wqe;
+ /* fill in BDE's for command */
+ pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
+ pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
+ pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
+ pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+
+ pwqe->send_frame.frame_len = frame_len;
+ pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
+ pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
+ pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
+ pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
+ pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
+ pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
+
+ pwqe->generic.wqe_com.word7 = 0;
+ pwqe->generic.wqe_com.word10 = 0;
+
+ bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
+ bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
+ bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
+ bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
+ bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
+ bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
+ bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
+ bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
+ pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
+
+ iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
- /*
- * Setup rest of the iocb as though it were a WQE
- * Build the SEND_FRAME WQE
- */
- wqe = (union lpfc_wqe *)&iocbq->iocb;
-
- wqe->send_frame.frame_len = frame_len;
- wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
- wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
- wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
- wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
- wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
- wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
-
- iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
- iocbq->iocb.ulpLe = 1;
- iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
if (rc == IOCB_ERROR)
goto exit;
@@ -19668,7 +19603,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
@@ -20311,8 +20246,8 @@ next_priority:
* have been tested so that we can detect when we should
* change the priority level.
*/
- next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
- LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
}
@@ -20654,11 +20589,7 @@ lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
}
lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
out:
- mempool_free(mboxq, phba->mbox_mem_pool);
- if (mp) {
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
+ lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
return data_length;
}
@@ -20973,7 +20904,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *mb, *nextmb;
- struct lpfc_dmabuf *mp;
struct lpfc_nodelist *ndlp;
struct lpfc_nodelist *act_mbx_ndlp = NULL;
LIST_HEAD(mbox_cmd_list);
@@ -20999,8 +20929,12 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
- /* Put reference count for delayed processing */
+
+ /* This reference is local to this routine. The
+ * reference is removed at routine exit.
+ */
act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
+
/* Unregister the RPI when mailbox complete */
mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
}
@@ -21043,12 +20977,6 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
while (!list_empty(&mbox_cmd_list)) {
list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
- mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
- if (mp) {
- __lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- }
- mb->ctx_buf = NULL;
ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
mb->ctx_ndlp = NULL;
if (ndlp) {
@@ -21058,7 +20986,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
lpfc_nlp_put(ndlp);
}
}
- mempool_free(mb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
}
/* Release the ndlp with the cleaned-up active mailbox command */
@@ -21089,10 +21017,9 @@ lpfc_drain_txq(struct lpfc_hba *phba)
struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
- struct lpfc_sglq *sglq;
- union lpfc_wqe128 wqe;
uint32_t txq_cnt = 0;
struct lpfc_queue *wq;
+ int ret = 0;
if (phba->link_flag & LS_MDS_LOOPBACK) {
/* MDS WQE are posted only to first WQ*/
@@ -21131,44 +21058,33 @@ lpfc_drain_txq(struct lpfc_hba *phba)
txq_cnt);
break;
}
- sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
- if (!sglq) {
- __lpfc_sli_ringtx_put(phba, pring, piocbq);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
- break;
- }
txq_cnt--;
- /* The xri and iocb resources secured,
- * attempt to issue request
- */
- piocbq->sli4_lxritag = sglq->sli4_lxritag;
- piocbq->sli4_xritag = sglq->sli4_xritag;
- if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
- fail_msg = "to convert bpl to sgl";
- else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
- fail_msg = "to convert iocb to wqe";
- else if (lpfc_sli4_wq_put(wq, &wqe))
- fail_msg = " - Wq is full";
- else
- lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
+ ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
+ if (ret && ret != IOCB_BUSY) {
+ fail_msg = " - Cannot send IO ";
+ piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
+ }
if (fail_msg) {
+ piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
/* Failed means we can't issue and need to cancel */
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"2822 IOCB failed %s iotag 0x%x "
- "xri 0x%x\n",
- fail_msg,
- piocbq->iotag, piocbq->sli4_xritag);
+ "xri 0x%x %d flg x%x\n",
+ fail_msg, piocbq->iotag,
+ piocbq->sli4_xritag, ret,
+ piocbq->cmd_flag);
list_add_tail(&piocbq->list, &completions);
fail_msg = NULL;
}
spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ if (txq_cnt == 0 || ret == IOCB_BUSY)
+ break;
}
-
/* Cancel all the IOCBs that cannot be issued */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ IOERR_SLI_ABORTED);
return txq_cnt;
}
@@ -21216,14 +21132,14 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
if (cmd == CMD_XMIT_BLS_RSP64_WQE)
return sglq->sli4_xritag;
- numBdes = pwqeq->rsvd2;
+ numBdes = pwqeq->num_bdes;
if (numBdes) {
/* The addrHigh and addrLow fields within the WQE
* have not been byteswapped yet so there is no
* need to swap them back.
*/
- if (pwqeq->context3)
- dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
+ if (pwqeq->bpl_dmabuf)
+ dmabuf = pwqeq->bpl_dmabuf;
else
return xritag;
@@ -21317,7 +21233,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
uint32_t ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
- if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
+ if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
pring = phba->sli4_hba.nvmels_wq->pring;
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
qp, wq_access);
@@ -21348,7 +21264,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVME_FCREQ and NVME_ABTS requests */
- if (pwqe->iocb_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
+ if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
@@ -21370,12 +21286,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
}
/* NVMET requests */
- if (pwqe->iocb_flag & LPFC_IO_NVMET) {
+ if (pwqe->cmd_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
wq = qp->io_wq;
pring = wq->pring;
- ctxp = pwqe->context2;
+ ctxp = pwqe->context_un.axchg;
sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) {
pwqe->sli4_lxritag = sglq->sli4_lxritag;
@@ -21436,12 +21352,12 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return WQE_NORESOURCE;
/* Indicate the IO is being aborted by the driver. */
- cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
abtswqe = &abtsiocb->wqe;
memset(abtswqe, 0, sizeof(*abtswqe));
- if (!lpfc_is_link_up(phba))
+ if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
abtswqe->abort_cmd.rsrvd5 = 0;
@@ -21455,15 +21371,15 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
- abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
- if (cmdiocb->iocb_flag & LPFC_IO_FCP)
- abtsiocb->iocb_flag |= LPFC_IO_FCP;
- if (cmdiocb->iocb_flag & LPFC_IO_NVME)
- abtsiocb->iocb_flag |= LPFC_IO_NVME;
- if (cmdiocb->iocb_flag & LPFC_IO_FOF)
- abtsiocb->iocb_flag |= LPFC_IO_FOF;
+ abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
+ if (cmdiocb->cmd_flag & LPFC_IO_FCP)
+ abtsiocb->cmd_flag |= LPFC_IO_FCP;
+ if (cmdiocb->cmd_flag & LPFC_IO_NVME)
+ abtsiocb->cmd_flag |= LPFC_IO_NVME;
+ if (cmdiocb->cmd_flag & LPFC_IO_FOF)
+ abtsiocb->cmd_flag |= LPFC_IO_FOF;
abtsiocb->vport = vport;
- abtsiocb->wqe_cmpl = cmpl;
+ abtsiocb->cmd_cmpl = cmpl;
lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
@@ -21474,7 +21390,7 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
if (retval) {
- cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
__lpfc_sli_release_iocbq(phba, abtsiocb);
}
@@ -21836,8 +21752,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
/* MUST zero fields if buffer is reused by another protocol */
lpfc_ncmd->nvmeCmd = NULL;
- lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
- lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
+ lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
if (phba->cfg_xpsgl && !phba->nvmet_support &&
!list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
@@ -22218,7 +22133,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->ctx_buf = NULL;
mbox->ctx_ndlp = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
@@ -22255,9 +22169,12 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
}
exit:
+ /* This is an embedded SLI4 mailbox with an external buffer allocated.
+ * Free the pcmd and then cleanup with the correct routine.
+ */
lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
kfree(pcmd);
- mempool_free(mbox, phba->mbox_mem_pool);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
return byte_cnt;
}
@@ -22449,7 +22366,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
return NULL;
}
- tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
+ tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
GFP_ATOMIC,
&tmp->fcp_cmd_rsp_dma_handle);
@@ -22551,3 +22468,180 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
}
+
+/**
+ * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
+ * @phba: phba object
+ * @job: job entry of the command to be posted.
+ *
+ * Fill the common fields of the wqe for each of the command.
+ *
+ * Return codes:
+ * None
+ **/
+void
+lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
+{
+ u8 cmnd;
+ u32 *pcmd;
+ u32 if_type = 0;
+ u32 fip, abort_tag;
+ struct lpfc_nodelist *ndlp = NULL;
+ union lpfc_wqe128 *wqe = &job->wqe;
+ u8 command_type = ELS_COMMAND_NON_FIP;
+
+ fip = phba->hba_flag & HBA_FIP_SUPPORT;
+ /* The fcp commands will set command type */
+ if (job->cmd_flag & LPFC_IO_FCP)
+ command_type = FCP_COMMAND;
+ else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
+ command_type = ELS_COMMAND_FIP;
+ else
+ command_type = ELS_COMMAND_NON_FIP;
+
+ abort_tag = job->iotag;
+ cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
+
+ switch (cmnd) {
+ case CMD_ELS_REQUEST64_WQE:
+ ndlp = job->ndlp;
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ pcmd = (u32 *)job->cmd_dmabuf->virt;
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_RDF ||
+ *pcmd == ELS_CMD_EDC ||
+ *pcmd == ELS_CMD_RSCN_XMT ||
+ *pcmd == ELS_CMD_FDISC ||
+ *pcmd == ELS_CMD_LOGO ||
+ *pcmd == ELS_CMD_QFPA ||
+ *pcmd == ELS_CMD_UVEM ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ job->vport->fc_myDID);
+
+ if ((*pcmd == ELS_CMD_FLOGI) &&
+ !(phba->fc_topology ==
+ LPFC_TOPOLOGY_LOOP))
+ bf_set(els_req64_sid, &wqe->els_req, 0);
+
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ } else if (pcmd) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
+ }
+
+ bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
+ break;
+ case CMD_XMIT_ELS_RSP64_WQE:
+ ndlp = job->ndlp;
+
+ /* word4 */
+ wqe->xmit_els_rsp.word4 = 0;
+
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
+ if (job->vport->fc_flag & FC_PT2PT) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ if (job->vport->fc_myDID == Fabric_DID) {
+ bf_set(wqe_els_did,
+ &wqe->xmit_els_rsp.wqe_dest, 0);
+ }
+ }
+ }
+
+ bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
+
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
+ bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
+ job->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
+ }
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+
+ if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
+ bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
+ phba->vpi_ids[job->vport->vpi]);
+ }
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_GEN_REQUEST64_WQE:
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_SEQUENCE64_WQE:
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
+
+ wqe->xmit_sequence.rsvd3 = 0;
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_XMIT_BLS_RSP64_WQE:
+ bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
+ bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
+ bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ /* Overwrite the pre-set comnd type with OTHER_COMMAND */
+ command_type = OTHER_COMMAND;
+ break;
+ case CMD_FCP_ICMND64_WQE: /* task mgmt commands */
+ case CMD_ABORT_XRI_WQE: /* abort iotag */
+ case CMD_SEND_FRAME: /* mds loopback */
+ /* cases already formatted for sli4 wqe - no chgs necessary */
+ return;
+ default:
+ dump_stack();
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "6207 Invalid command 0x%x\n",
+ cmnd);
+ break;
+ }
+
+ wqe->generic.wqe_com.abort_tag = abort_tag;
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 5161ccacea3e..cd33dfec758c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -35,7 +35,13 @@ typedef enum _lpfc_ctx_cmd {
LPFC_CTX_HOST
} lpfc_ctx_cmd;
-union lpfc_vmid_iocb_tag {
+/* Enumeration to describe the thread lock context. */
+enum lpfc_mbox_ctx {
+ MBOX_THD_UNLOCKED,
+ MBOX_THD_LOCKED
+};
+
+union lpfc_vmid_tag {
uint32_t app_id;
uint8_t cs_ctl_vmid;
struct lpfc_vmid_context *vmid_context; /* UVEM context information */
@@ -69,16 +75,25 @@ struct lpfc_iocbq {
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
uint16_t hba_wqidx; /* index to HBA work queue */
struct lpfc_cq_event cq_event;
- struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
uint64_t isr_timestamp;
union lpfc_wqe128 wqe; /* SLI-4 */
IOCB_t iocb; /* SLI-3 */
+ struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
- uint8_t rsvd2;
- uint8_t priority; /* OAS priority */
- uint8_t retry; /* retry counter for IOCB cmd - if needed */
- uint32_t iocb_flag;
+ u32 unsol_rcv_len; /* Receive len in usol path */
+
+ /* Pack the u8's together and make them module-4. */
+ u8 num_bdes; /* Number of BDEs */
+ u8 abort_bls; /* ABTS by initiator or responder */
+ u8 abort_rctl; /* ACC or RJT flag */
+ u8 priority; /* OAS priority */
+ u8 retry; /* retry counter for IOCB cmd - if needed */
+ u8 rsvd1; /* Pad for u32 */
+ u8 rsvd2; /* Pad for u32 */
+ u8 rsvd3; /* Pad for u32 */
+
+ u32 cmd_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
@@ -111,27 +126,29 @@ struct lpfc_iocbq {
uint32_t drvrTimeout; /* driver timeout in seconds */
struct lpfc_vport *vport;/* virtual port pointer */
- void *context1; /* caller context information */
- void *context2; /* caller context information */
- void *context3; /* caller context information */
+ struct lpfc_dmabuf *cmd_dmabuf;
+ struct lpfc_dmabuf *rsp_dmabuf;
+ struct lpfc_dmabuf *bpl_dmabuf;
uint32_t event_tag; /* LA Event tag */
union {
wait_queue_head_t *wait_queue;
- struct lpfc_iocbq *rsp_iocb;
struct lpfcMboxq *mbox;
- struct lpfc_nodelist *ndlp;
struct lpfc_node_rrq *rrq;
+ struct nvmefc_ls_req *nvme_lsreq;
+ struct lpfc_async_xchg_ctx *axchg;
+ struct bsg_job_data *dd_data;
} context_un;
- union lpfc_vmid_iocb_tag vmid_tag;
- void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_iocbq *);
- void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
- struct lpfc_wcqe_complete *);
+ struct lpfc_io_buf *io_buf;
+ struct lpfc_iocbq *rsp_iocb;
+ struct lpfc_nodelist *ndlp;
+ union lpfc_vmid_tag vmid_tag;
+ void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
+ void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
+ struct lpfc_iocbq *rsp);
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -338,7 +355,6 @@ struct lpfc_sli {
#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */
#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
-#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 5962cf508842..cbb1aa1cf025 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -489,7 +489,7 @@ struct lpfc_hba;
#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
- uint16_t irq;
+ int irq;
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
struct lpfc_queue *eq;
@@ -611,6 +611,8 @@ struct lpfc_vector_map_info {
};
#define LPFC_VECTOR_MAP_EMPTY 0xffff
+#define LPFC_IRQ_EMPTY 0xffffffff
+
/* Multi-XRI pool */
#define XRI_BATCH 8
@@ -981,6 +983,9 @@ struct lpfc_sli4_hba {
#define lpfc_conf_trunk_port3_nd_MASK 0x1
uint8_t flash_id;
uint8_t asic_rev;
+ uint16_t fawwpn_flag; /* FA-WWPN support state */
+#define LPFC_FAWWPN_CONFIG 0x1 /* FA-PWWN is configured */
+#define LPFC_FAWWPN_FABRIC 0x2 /* FA-PWWN success with Fabric */
};
enum lpfc_sge_type {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5a4d3b24fbce..192d5630a44d 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.0.0.3"
+#define LPFC_DRIVER_VERSION "14.2.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2021 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2022 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
new file mode 100644
index 000000000000..ed1d7f7b88a3
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -0,0 +1,286 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/dma-direction.h>
+
+#include <scsi/scsi_transport_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_crtn.h"
+
+
+/*
+ * lpfc_get_vmid_from_hashtable - search the UUID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash: calculated hash value
+ * @buf: uuid associated with the VE
+ * Return the VMID entry associated with the UUID
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
+ u32 hash, u8 *buf)
+{
+ struct lpfc_vmid *vmp;
+
+ hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
+ if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
+ return vmp;
+ }
+ return NULL;
+}
+
+/*
+ * lpfc_put_vmid_in_hashtable - put the VMID in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @hash - calculated hash value
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ *
+ * This routine will insert the newly acquired VMID entity in the hash table.
+ * Make sure to acquire the appropriate lock before invoking this routine.
+ */
+static void
+lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
+ struct lpfc_vmid *vmp)
+{
+ hash_add(vport->hash_table, &vmp->hnode, hash);
+}
+
+/*
+ * lpfc_vmid_hash_fn - create a hash value of the UUID
+ * @vmid: uuid associated with the VE
+ * @len: length of the VMID string
+ * Returns the calculated hash value
+ */
+int lpfc_vmid_hash_fn(const char *vmid, int len)
+{
+ int c;
+ int hash = 0;
+
+ if (len == 0)
+ return 0;
+ while (len--) {
+ c = *vmid++;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+
+ hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
+ (c >> LPFC_VMID_HASH_SHIFT)) * 19;
+ }
+
+ return hash & LPFC_VMID_HASH_MASK;
+}
+
+/*
+ * lpfc_vmid_update_entry - update the vmid entry in the hash table
+ * @vport: The virtual port for which this call is being executed.
+ * @iodir: io direction
+ * @vmp: Pointer to a VMID entry representing a VM sending I/O
+ * @tag: VMID tag
+ */
+static void lpfc_vmid_update_entry(struct lpfc_vport *vport,
+ enum dma_data_direction iodir,
+ struct lpfc_vmid *vmp,
+ union lpfc_vmid_io_tag *tag)
+{
+ u64 *lta;
+
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
+ else if (vport->phba->cfg_vmid_app_header)
+ tag->app_id = vmp->un.app_id;
+
+ if (iodir == DMA_TO_DEVICE)
+ vmp->io_wr_cnt++;
+ else if (iodir == DMA_FROM_DEVICE)
+ vmp->io_rd_cnt++;
+
+ /* update the last access timestamp in the table */
+ lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
+ *lta = jiffies;
+}
+
+static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
+ struct lpfc_vmid *vmid)
+{
+ u32 hash;
+ struct lpfc_vmid *pvmid;
+
+ if (vport->port_type == LPFC_PHYSICAL_PORT) {
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ } else {
+ hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
+ pvmid =
+ lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
+ vmid->host_vmid);
+ if (pvmid)
+ vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
+ else
+ vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
+ }
+}
+
+/*
+ * lpfc_vmid_get_appid - get the VMID associated with the UUID
+ * @vport: The virtual port for which this call is being executed.
+ * @uuid: UUID associated with the VE
+ * @cmd: address of scsi_cmd descriptor
+ * @iodir: io direction
+ * @tag: VMID tag
+ * Returns status of the function
+ */
+int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
+ enum dma_data_direction iodir,
+ union lpfc_vmid_io_tag *tag)
+{
+ struct lpfc_vmid *vmp = NULL;
+ int hash, len, rc = -EPERM, i;
+
+ /* check if QFPA is complete */
+ if (lpfc_vmid_is_type_priority_tag(vport) &&
+ !(vport->vmid_flag & LPFC_VMID_QFPA_CMPL) &&
+ (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA)) {
+ vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
+ return -EAGAIN;
+ }
+
+ /* search if the UUID has already been mapped to the VMID */
+ len = strlen(uuid);
+ hash = lpfc_vmid_hash_fn(uuid, len);
+
+ /* search for the VMID in the table */
+ read_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* if found, check if its already registered */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ read_unlock(&vport->vmid_lock);
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ rc = 0;
+ } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
+ vmp->flag & LPFC_VMID_DE_REGISTER)) {
+ /* else if register or dereg request has already been sent */
+ /* Hence VMID tag will not be added for this I/O */
+ read_unlock(&vport->vmid_lock);
+ rc = -EBUSY;
+ } else {
+ /* The VMID was not found in the hashtable. At this point, */
+ /* drop the read lock first before proceeding further */
+ read_unlock(&vport->vmid_lock);
+ /* start the process to obtain one as per the */
+ /* type of the VMID indicated */
+ write_lock(&vport->vmid_lock);
+ vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
+
+ /* while the read lock was released, in case the entry was */
+ /* added by other context or is in process of being added */
+ if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
+ lpfc_vmid_update_entry(vport, iodir, vmp, tag);
+ write_unlock(&vport->vmid_lock);
+ return 0;
+ } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
+ write_unlock(&vport->vmid_lock);
+ return -EBUSY;
+ }
+
+ /* else search and allocate a free slot in the hash table */
+ if (vport->cur_vmid_cnt < vport->max_vmid) {
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = vport->vmid + i;
+ if (vmp->flag == LPFC_VMID_SLOT_FREE)
+ break;
+ }
+ if (i == vport->max_vmid)
+ vmp = NULL;
+ } else {
+ vmp = NULL;
+ }
+
+ if (!vmp) {
+ write_unlock(&vport->vmid_lock);
+ return -ENOMEM;
+ }
+
+ /* Add the vmid and register */
+ lpfc_put_vmid_in_hashtable(vport, hash, vmp);
+ vmp->vmid_len = len;
+ memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+ vmp->flag = LPFC_VMID_SLOT_USED;
+
+ vmp->delete_inactive =
+ vport->vmid_inactivity_timeout ? 1 : 0;
+
+ /* if type priority tag, get next available VMID */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ lpfc_vmid_assign_cs_ctl(vport, vmp);
+
+ /* allocate the per cpu variable for holding */
+ /* the last access time stamp only if VMID is enabled */
+ if (!vmp->last_io_time)
+ vmp->last_io_time = alloc_percpu_gfp(u64, GFP_ATOMIC);
+ if (!vmp->last_io_time) {
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ write_unlock(&vport->vmid_lock);
+
+ /* complete transaction with switch */
+ if (vport->phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO)
+ rc = lpfc_vmid_uvem(vport, vmp, true);
+ else if (vport->phba->cfg_vmid_app_header)
+ rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
+ if (!rc) {
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt++;
+ vmp->flag |= LPFC_VMID_REQ_REGISTER;
+ write_unlock(&vport->vmid_lock);
+ } else {
+ write_lock(&vport->vmid_lock);
+ hash_del(&vmp->hnode);
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ free_percpu(vmp->last_io_time);
+ write_unlock(&vport->vmid_lock);
+ return -EIO;
+ }
+
+ /* finally, enable the idle timer once */
+ if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
+ mod_timer(&vport->phba->inactive_vmid_poll,
+ jiffies +
+ msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
+ vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
+ }
+ }
+ return rc;
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index da9a1f72d938..4d171f5c213f 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -135,12 +135,14 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
}
/*
- * Grab buffer pointer and clear context1 so we can use
- * lpfc_sli_issue_box_wait
+ * Wait for the read_sparams mailbox to complete. Driver needs
+ * this per vport to start the FDISC. If the mailbox fails,
+ * just cleanup and return an error unless the failure is a
+ * mailbox timeout. For MBX_TIMEOUT, allow the default
+ * mbox completion handler to take care of the cleanup. This
+ * is safe as the mailbox command isn't one that triggers
+ * another mailbox.
*/
- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
- pmb->ctx_buf = NULL;
-
pmb->vport = vport;
rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
if (rc != MBX_SUCCESS) {
@@ -148,34 +150,29 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1830 Signal aborted mbxCmd x%x\n",
mb->mbxCommand);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
if (rc != MBX_TIMEOUT)
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb,
+ MBOX_THD_UNLOCKED);
return -EINTR;
} else {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1818 VPort failed init, mbxCmd x%x "
"READ_SPARM mbxStatus x%x, rc = x%x\n",
mb->mbxCommand, mb->mbxStatus, rc);
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
if (rc != MBX_TIMEOUT)
- mempool_free(pmb, phba->mbox_mem_pool);
+ lpfc_mbox_rsrc_cleanup(phba, pmb,
+ MBOX_THD_UNLOCKED);
return -EIO;
}
}
+ mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof (struct lpfc_name));
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof (struct lpfc_name));
-
- lpfc_mbuf_free(phba, mp->virt, mp->phys);
- kfree(mp);
- mempool_free(pmb, phba->mbox_mem_pool);
-
+ lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
return 0;
}
@@ -486,22 +483,67 @@ error_out:
}
static int
+lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ int rc;
+ struct lpfc_hba *phba = vport->phba;
+
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ !ndlp->logo_waitq) {
+ ndlp->logo_waitq = &waitq;
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ }
+ spin_unlock_irq(&ndlp->lock);
+ rc = lpfc_issue_els_npiv_logo(vport, ndlp);
+ if (!rc) {
+ wait_event_timeout(waitq,
+ (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(phba->fc_ratov * 2000));
+
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ goto logo_cmpl;
+ /* LOGO wait failed. Correct status. */
+ rc = -EINTR;
+ } else {
+ rc = -EIO;
+ }
+
+ /* Error - clean up node flags. */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+
+ logo_cmpl:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+ "1824 Issue LOGO completes with status %d\n",
+ rc);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->logo_waitq = NULL;
+ spin_unlock_irq(&ndlp->lock);
+ return rc;
+}
+
+static int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
- long timeout;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* Can't disable during an outstanding delete. */
+ if (vport->load_flag & FC_UNLOADING)
+ return 0;
+
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && phba->link_state >= LPFC_LINK_UP) {
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
- }
+ if (ndlp && phba->link_state >= LPFC_LINK_UP)
+ (void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
@@ -600,7 +642,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- long timeout;
+ int rc;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -665,15 +707,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
/* Send DA_ID and wait for a completion. */
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
- while (vport->ct_flags && timeout)
- timeout = schedule_timeout(timeout);
- else
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
+ if (rc) {
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
- "delete objects on fabric\n");
+ "delete objects on fabric, "
+ "rc %d\n", rc);
+ }
}
/*
@@ -688,11 +729,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
goto skip_logo;
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
+
+ rc = lpfc_send_npiv_logo(vport, ndlp);
+ if (rc)
+ goto skip_logo;
}
if (!(phba->pport->load_flag & FC_UNLOADING))
@@ -769,74 +809,3 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
kfree(vports);
}
-
-/**
- * lpfc_vport_reset_stat_data - Reset the statistical data for the vport
- * @vport: Pointer to vport object.
- *
- * This function resets the statistical data for the vport. This function
- * is called with the host_lock held
- **/
-void
-lpfc_vport_reset_stat_data(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->lat_data)
- memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT *
- sizeof(struct lpfc_scsicmd_bkt));
- }
-}
-
-
-/**
- * lpfc_alloc_bucket - Allocate data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * This function allocates data buffer required for all the FC
- * nodes of the vport to collect statistical data.
- **/
-void
-lpfc_alloc_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
-
- if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
- ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
- sizeof(struct lpfc_scsicmd_bkt),
- GFP_ATOMIC);
-
- if (!ndlp->lat_data)
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_TRACE_EVENT,
- "0287 lpfc_alloc_bucket failed to "
- "allocate statistical data buffer DID "
- "0x%x\n", ndlp->nlp_DID);
- }
- }
-}
-
-/**
- * lpfc_free_bucket - Free data buffer required for statistical data
- * @vport: Pointer to vport object.
- *
- * Th function frees statistical data buffer of all the FC
- * nodes of the vport.
- **/
-void
-lpfc_free_bucket(struct lpfc_vport *vport)
-{
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
-
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
-
- kfree(ndlp->lat_data);
- ndlp->lat_data = NULL;
- }
-}
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index f4b8528dd2e7..fa60c146c169 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -115,8 +115,4 @@ struct vport_cmd_tag {
void lpfc_vport_set_state(struct lpfc_vport *vport,
enum fc_vport_state new_state);
-void lpfc_vport_reset_stat_data(struct lpfc_vport *);
-void lpfc_alloc_bucket(struct lpfc_vport *);
-void lpfc_free_bucket(struct lpfc_vport *);
-
#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 3976a18f6333..f75928f7773e 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -125,7 +125,6 @@ static void mac53c94_init(struct fsc_state *state)
{
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
- int x;
writeb(state->host->this_id | CF1_PAR_ENABLE, &regs->config1);
writeb(TIMO_VAL(250), &regs->sel_timeout); /* 250ms */
@@ -134,7 +133,7 @@ static void mac53c94_init(struct fsc_state *state)
writeb(0, &regs->config3);
writeb(0, &regs->sync_period);
writeb(0, &regs->sync_offset);
- x = readb(&regs->interrupt);
+ (void)readb(&regs->interrupt);
writel((RUN|PAUSE|FLUSH|WAKE) << 16, &dma->control);
}
@@ -194,7 +193,8 @@ static void mac53c94_interrupt(int irq, void *dev_id)
struct fsc_state *state = (struct fsc_state *) dev_id;
struct mac53c94_regs __iomem *regs = state->regs;
struct dbdma_regs __iomem *dma = state->dma;
- struct scsi_cmnd *cmd = state->current_req;
+ struct scsi_cmnd *const cmd = state->current_req;
+ struct mac53c94_cmd_priv *const mcmd = mac53c94_priv(cmd);
int nb, stat, seq, intr;
static int mac53c94_errors;
@@ -234,7 +234,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
++mac53c94_errors;
writeb(CMD_NOP + CMD_DMA_MODE, &regs->command);
}
- if (cmd == 0) {
+ if (!cmd) {
printk(KERN_DEBUG "53c94: interrupt with no command active?\n");
return;
}
@@ -264,10 +264,10 @@ static void mac53c94_interrupt(int irq, void *dev_id)
/* set DMA controller going if any data to transfer */
if ((stat & (STAT_MSG|STAT_CD)) == 0
&& (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
- nb = cmd->SCp.this_residual;
+ nb = mcmd->this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
- cmd->SCp.this_residual -= nb;
+ mcmd->this_residual -= nb;
writeb(nb, &regs->count_lo);
writeb(nb >> 8, &regs->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, &regs->command);
@@ -294,13 +294,13 @@ static void mac53c94_interrupt(int irq, void *dev_id)
cmd_done(state, DID_ERROR << 16);
return;
}
- if (cmd->SCp.this_residual != 0
+ if (mcmd->this_residual != 0
&& (stat & (STAT_MSG|STAT_CD)) == 0) {
/* Set up the count regs to transfer more */
- nb = cmd->SCp.this_residual;
+ nb = mcmd->this_residual;
if (nb > 0xfff0)
nb = 0xfff0;
- cmd->SCp.this_residual -= nb;
+ mcmd->this_residual -= nb;
writeb(nb, &regs->count_lo);
writeb(nb >> 8, &regs->count_mid);
writeb(CMD_DMA_MODE + CMD_NOP, &regs->command);
@@ -322,8 +322,8 @@ static void mac53c94_interrupt(int irq, void *dev_id)
cmd_done(state, DID_ERROR << 16);
return;
}
- cmd->SCp.Status = readb(&regs->fifo);
- cmd->SCp.Message = readb(&regs->fifo);
+ mcmd->status = readb(&regs->fifo);
+ mcmd->message = readb(&regs->fifo);
writeb(CMD_ACCEPT_MSG, &regs->command);
state->phase = busfreeing;
break;
@@ -331,8 +331,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
if (intr != INTR_DISCONNECT) {
printk(KERN_DEBUG "got intr %x when expected disconnect\n", intr);
}
- cmd_done(state, (DID_OK << 16) + (cmd->SCp.Message << 8)
- + cmd->SCp.Status);
+ cmd_done(state, (DID_OK << 16) + (mcmd->message << 8) + mcmd->status);
break;
default:
printk(KERN_DEBUG "don't know about phase %d\n", state->phase);
@@ -390,7 +389,7 @@ static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
dcmds[-1].command = cpu_to_le16(dma_cmd);
dcmds->command = cpu_to_le16(DBDMA_STOP);
- cmd->SCp.this_residual = total;
+ mac53c94_priv(cmd)->this_residual = total;
}
static struct scsi_host_template mac53c94_template = {
@@ -402,6 +401,7 @@ static struct scsi_host_template mac53c94_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.max_segment_size = 65535,
+ .cmd_size = sizeof(struct mac53c94_cmd_priv),
};
static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *match)
diff --git a/drivers/scsi/mac53c94.h b/drivers/scsi/mac53c94.h
index 5df6e81f78a8..b4093027f9c3 100644
--- a/drivers/scsi/mac53c94.h
+++ b/drivers/scsi/mac53c94.h
@@ -212,4 +212,15 @@ struct mac53c94_regs {
#define CF4_TEST 0x02
#define CF4_BBTE 0x01
+struct mac53c94_cmd_priv {
+ int this_residual;
+ int status;
+ int message;
+};
+
+static inline struct mac53c94_cmd_priv *mac53c94_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#endif /* _MAC53C94_H */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 5c808fbc6ce2..2e511697fce3 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -404,11 +404,12 @@ out:
static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
- cmd->SCp.this_residual < setup_use_pdma)
+ int resid = NCR5380_to_ncmd(cmd)->this_residual;
+
+ if (hostdata->flags & FLAG_NO_PSEUDO_DMA || resid < setup_use_pdma)
return 0;
- return cmd->SCp.this_residual;
+ return resid;
}
static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
@@ -434,7 +435,7 @@ static struct scsi_host_template mac_scsi_template = {
.sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
.max_sectors = 128,
};
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 0d31d7a5e335..bf491af9f0d6 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -44,10 +44,14 @@
#include <linux/dma-mapping.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <scsi/scsicam.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
#include "megaraid.h"
@@ -192,23 +196,21 @@ mega_query_adapter(adapter_t *adapter)
{
dma_addr_t prod_info_dma_handle;
mega_inquiry3 *inquiry3;
- u8 raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ u8 *raw_mbox = (u8 *)&mbox;
int retval;
/* Initialize adapter inquiry mailbox */
- mbox = (mbox_t *)raw_mbox;
-
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* Try to issue Inquiry3 command
* if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
* update enquiry3 structure
*/
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
@@ -232,10 +234,10 @@ mega_query_adapter(adapter_t *adapter)
inq = &ext_inq->raid_inq;
- mbox->m_out.xferaddr = (u32)dma_handle;
+ mbox.xferaddr = (u32)dma_handle;
/*issue old 0x04 command to adapter */
- mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
+ mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
issue_scb_block(adapter, raw_mbox);
@@ -262,7 +264,7 @@ mega_query_adapter(adapter_t *adapter)
sizeof(mega_product_info),
DMA_FROM_DEVICE);
- mbox->m_out.xferaddr = prod_info_dma_handle;
+ mbox.xferaddr = prod_info_dma_handle;
raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
@@ -1642,16 +1644,10 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
static void
mega_rundoneq (adapter_t *adapter)
{
- struct scsi_cmnd *cmd;
- struct list_head *pos;
-
- list_for_each(pos, &adapter->completed_list) {
+ struct megaraid_cmd_priv *cmd_priv;
- struct scsi_pointer* spos = (struct scsi_pointer *)pos;
-
- cmd = list_entry(spos, struct scsi_cmnd, SCp);
- scsi_done(cmd);
- }
+ list_for_each_entry(cmd_priv, &adapter->completed_list, entry)
+ scsi_done(megaraid_to_scsi_cmd(cmd_priv));
INIT_LIST_HEAD(&adapter->completed_list);
}
@@ -3569,16 +3565,14 @@ mega_n_to_m(void __user *arg, megacmd_t *mc)
static int
mega_is_bios_enabled(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = IS_BIOS_ENABLED;
raw_mbox[2] = GET_BIOS;
@@ -3600,13 +3594,11 @@ mega_is_bios_enabled(adapter_t *adapter)
static void
mega_enum_raid_scsi(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out what channels are raid/scsi
@@ -3616,7 +3608,7 @@ mega_enum_raid_scsi(adapter_t *adapter)
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Non-ROMB firmware fail this command, so all channels
@@ -3655,23 +3647,21 @@ static void
mega_get_boot_drv(adapter_t *adapter)
{
struct private_bios_data *prv_bios_data;
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
u16 cksum = 0;
u8 *cksum_p;
u8 boot_pdrv;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
raw_mbox[0] = BIOS_PVT_DATA;
raw_mbox[2] = GET_BIOS_PVT_DATA;
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
adapter->boot_ldrv_enabled = 0;
adapter->boot_ldrv = 0;
@@ -3721,13 +3711,11 @@ mega_get_boot_drv(adapter_t *adapter)
static int
mega_support_random_del(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command
@@ -3750,13 +3738,11 @@ mega_support_random_del(adapter_t *adapter)
static int
mega_support_ext_cdb(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out if controller supports extended CDBs.
*/
@@ -3865,16 +3851,14 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
static void
mega_get_max_sgl(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = MAIN_MISC_OPCODE;
raw_mbox[2] = GET_MAX_SG_SUPPORT;
@@ -3888,7 +3872,7 @@ mega_get_max_sgl(adapter_t *adapter)
}
else {
adapter->sglen = *((char *)adapter->mega_buffer);
-
+
/*
* Make sure this is not more than the resources we are
* planning to allocate
@@ -3910,16 +3894,14 @@ mega_get_max_sgl(adapter_t *adapter)
static int
mega_support_cluster(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Try to get the initiator id. This command will succeed iff the
@@ -4135,6 +4117,7 @@ static struct scsi_host_template megaraid_template = {
.eh_bus_reset_handler = megaraid_reset,
.eh_host_reset_handler = megaraid_reset,
.no_write_same = 1,
+ .cmd_size = sizeof(struct megaraid_cmd_priv),
};
static int
@@ -4624,7 +4607,7 @@ static int __init megaraid_init(void)
* major number allocation.
*/
major = register_chrdev(0, "megadev_legacy", &megadev_fops);
- if (!major) {
+ if (major < 0) {
printk(KERN_WARNING
"megaraid: failed to register char device\n");
}
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index cce23a086fbe..013fbfb911b9 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <scsi/scsi_cmnd.h>
#define MEGARAID_VERSION \
"v2.00.4 (Release Date: Thu Feb 9 08:51:30 EST 2006)\n"
@@ -756,8 +757,28 @@ struct private_bios_data {
#define CACHED_IO 0
#define DIRECT_IO 1
+struct megaraid_cmd_priv {
+ struct list_head entry;
+};
+
+#define SCSI_LIST(scp) \
+ (&((struct megaraid_cmd_priv *)scsi_cmd_priv(scp))->entry)
+
+struct scsi_cmd_and_priv {
+ struct scsi_cmnd cmd;
+ struct megaraid_cmd_priv priv;
+};
+
+static inline struct scsi_cmnd *
+megaraid_to_scsi_cmd(struct megaraid_cmd_priv *cmd_priv)
+{
+ /* See also scsi_mq_setup_tags() */
+ BUILD_BUG_ON(sizeof(struct scsi_cmd_and_priv) !=
+ sizeof(struct scsi_cmnd) +
+ sizeof(struct megaraid_cmd_priv));
-#define SCSI_LIST(scp) ((struct list_head *)(&(scp)->SCp))
+ return &container_of(cmd_priv, struct scsi_cmd_and_priv, priv)->cmd;
+}
/*
* Each controller's soft state
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 14f930d27ca1..132de68c14e9 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -181,7 +181,7 @@ MODULE_PARM_DESC(cmd_per_lun,
* This would result in non-disk devices being skipped during driver load
* time. These can be later added though, using /proc/scsi/scsi
*/
-static unsigned int megaraid_fast_load = 0;
+static unsigned int megaraid_fast_load;
module_param_named(fast_load, megaraid_fast_load, int, 0);
MODULE_PARM_DESC(fast_load,
"Faster loading of the driver, skips physical devices! (default=0)");
@@ -1431,7 +1431,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
/**
* megaraid_queue_command_lck - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed
- * @done : callback routine to be called after the cmd has be completed
*
* Queue entry point for mailbox based controllers.
*/
@@ -3980,7 +3979,7 @@ megaraid_mbox_app_hndl_show(struct device *dev, struct device_attribute *attr, c
app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
- return snprintf(buf, 8, "%u\n", app_hndl);
+ return sysfs_emit(buf, "%u\n", app_hndl);
}
@@ -4049,7 +4048,7 @@ megaraid_mbox_ld_show(struct device *dev, struct device_attribute *attr, char *b
}
}
- return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
+ return sysfs_emit(buf, "%d %d %d %d\n", scsi_id, logical_drv,
ldid_map, app_hndl);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 2c9d1b796475..4919ea54b827 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,6 +18,8 @@
#ifndef LSI_MEGARAID_SAS_H
#define LSI_MEGARAID_SAS_H
+#include <scsi/scsi_cmnd.h>
+
/*
* MegaRAID SAS Driver meta data
*/
@@ -2558,6 +2560,9 @@ struct megasas_instance_template {
#define MEGASAS_IS_LOGICAL(sdev) \
((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LUN_VALID(sdev) \
+ (((sdev)->lun == 0) ? 1 : 0)
+
#define MEGASAS_DEV_INDEX(scp) \
(((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
scp->device->id)
@@ -2594,6 +2599,16 @@ struct megasas_cmd {
};
};
+struct megasas_cmd_priv {
+ void *cmd_priv;
+ u8 status;
+};
+
+static inline struct megasas_cmd_priv *megasas_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
#define MAX_MGMT_ADAPTERS 1024
#define MAX_IOCTL_SGE 16
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index aeb95f409826..d265a2d9d082 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1760,7 +1760,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
goto out_return_cmd;
cmd->scmd = scmd;
- scmd->SCp.ptr = (char *)cmd;
+ megasas_priv(scmd)->cmd_priv = cmd;
/*
* Issue the command to the FW
@@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
goto scan_target;
}
return -ENXIO;
+ } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return -ENXIO;
}
scan_target:
@@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
instance = megasas_lookup_instance(sdev->host->host_no);
if (MEGASAS_IS_LOGICAL(sdev)) {
+ if (!MEGASAS_IS_LUN_VALID(sdev)) {
+ sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+ return;
+ }
ld_tgt_id = MEGASAS_TARGET_ID(sdev);
instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
if (megasas_dbg_lvl & LD_PD_DEBUG)
@@ -2992,11 +2999,10 @@ megasas_dump_reg_set(void __iomem *reg_set)
void
megasas_dump_fusion_io(struct scsi_cmnd *scmd)
{
- struct megasas_cmd_fusion *cmd;
+ struct megasas_cmd_fusion *cmd = megasas_priv(scmd)->cmd_priv;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct megasas_instance *instance;
- cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
instance = (struct megasas_instance *)scmd->device->host->hostdata;
scmd_printk(KERN_INFO, scmd,
@@ -3168,7 +3174,7 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
-static int megasas_map_queues(struct Scsi_Host *shost)
+static void megasas_map_queues(struct Scsi_Host *shost)
{
struct megasas_instance *instance;
int qoff = 0, offset;
@@ -3177,7 +3183,7 @@ static int megasas_map_queues(struct Scsi_Host *shost)
instance = (struct megasas_instance *)shost->hostdata;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
offset = instance->low_latency_index_start;
@@ -3189,6 +3195,9 @@ static int megasas_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
offset += map->nr_queues;
+ /* we never use READ queue, so can't cheat blk-mq */
+ shost->tag_set.map[HCTX_TYPE_READ].nr_queues = 0;
+
/* Setup Poll hctx */
map = &shost->tag_set.map[HCTX_TYPE_POLL];
map->nr_queues = instance->iopoll_q_count;
@@ -3200,8 +3209,6 @@ static int megasas_map_queues(struct Scsi_Host *shost)
map->queue_offset = qoff;
blk_mq_map_queues(map);
}
-
- return 0;
}
static void megasas_aen_polling(struct work_struct *work);
@@ -3518,6 +3525,7 @@ static struct scsi_host_template megasas_template = {
.mq_poll = megasas_blk_mq_poll,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
+ .cmd_size = sizeof(struct megasas_cmd_priv),
};
/**
@@ -3601,7 +3609,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
cmd->retry_for_fw_reset = 0;
if (cmd->scmd)
- cmd->scmd->SCp.ptr = NULL;
+ megasas_priv(cmd->scmd)->cmd_priv = NULL;
switch (hdr->cmd) {
case MFI_CMD_INVALID:
@@ -3940,9 +3948,9 @@ process_fw_state_change_wq(struct work_struct *work)
u32 wait;
unsigned long flags;
- if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
- atomic_read(&instance->adprecovery));
+ atomic_read(&instance->adprecovery));
return ;
}
@@ -4013,10 +4021,8 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
u32 mfiStatus;
u32 fw_state;
- if ((mfiStatus = instance->instancet->check_reset(instance,
- instance->reg_set)) == 1) {
+ if (instance->instancet->check_reset(instance, instance->reg_set) == 1)
return IRQ_HANDLED;
- }
mfiStatus = instance->instancet->clear_intr(instance);
if (mfiStatus == 0) {
@@ -4466,8 +4472,6 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
return -ENOMEM;
}
- memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
-
for (i = 0; i < max_cmd; i++) {
instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
GFP_KERNEL);
@@ -5149,9 +5153,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
fusion->current_map_sz = ventura_map_sz;
fusion->max_map_sz = ventura_map_sz;
} else {
- fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- (instance->fw_supported_vd_count - 1));
+ fusion->old_map_sz =
+ struct_size((struct MR_FW_RAID_MAP *)0, ldSpanMap,
+ instance->fw_supported_vd_count);
fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
fusion->max_map_sz =
@@ -5720,7 +5724,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
"Failed to register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) {
if (j < instance->low_latency_index_start)
- irq_set_affinity_hint(
+ irq_update_affinity_hint(
pci_irq_vector(pdev, j), NULL);
free_irq(pci_irq_vector(pdev, j),
&instance->irq_context[j]);
@@ -5763,7 +5767,7 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
if (i < instance->low_latency_index_start)
- irq_set_affinity_hint(
+ irq_update_affinity_hint(
pci_irq_vector(instance->pdev, i), NULL);
free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
@@ -5784,10 +5788,10 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
{
int i;
struct fusion_context *fusion = instance->ctrl_context;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz = struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, seq,
+ MAX_PHYSICAL_DEVICES);
instance->use_seqnum_jbod_fp =
instance->support_seqnum_jbod_fp;
@@ -5870,10 +5874,6 @@ fallback:
static
int megasas_get_device_list(struct megasas_instance *instance)
{
- memset(instance->pd_list, 0,
- (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
- memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
-
if (instance->enable_fw_dev_list) {
if (megasas_host_device_list_query(instance, true))
return FAILED;
@@ -5894,22 +5894,25 @@ int megasas_get_device_list(struct megasas_instance *instance)
}
/**
- * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
- * @instance: Adapter soft state
- * return: void
+ * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint
+ * for high IOPS queues
+ * @instance: Adapter soft state
+ * return: void
*/
static inline void
-megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
{
int i;
- int local_numa_node;
+ unsigned int irq;
+ const struct cpumask *mask;
if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
- local_numa_node = dev_to_node(&instance->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
- for (i = 0; i < instance->low_latency_index_start; i++)
- irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
- cpumask_of_node(local_numa_node));
+ for (i = 0; i < instance->low_latency_index_start; i++) {
+ irq = pci_irq_vector(instance->pdev, i);
+ irq_set_affinity_and_hint(irq, mask);
+ }
}
}
@@ -5998,7 +6001,7 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
instance->msix_vectors = 0;
if (instance->smp_affinity_enable)
- megasas_set_high_iops_queue_affinity_hint(instance);
+ megasas_set_high_iops_queue_affinity_and_hint(instance);
}
/**
@@ -7142,22 +7145,18 @@ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
switch (instance->adapter_type) {
case MFI_SERIES:
if (megasas_alloc_mfi_ctrl_mem(instance))
- goto fail;
+ return -ENOMEM;
break;
case AERO_SERIES:
case VENTURA_SERIES:
case THUNDERBOLT_SERIES:
case INVADER_SERIES:
if (megasas_alloc_fusion_context(instance))
- goto fail;
+ return -ENOMEM;
break;
}
return 0;
- fail:
- kfree(instance->reply_map);
- instance->reply_map = NULL;
- return -ENOMEM;
}
/*
@@ -7217,7 +7216,7 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
if (!fusion->ioc_init_request) {
dev_err(&pdev->dev,
- "Failed to allocate PD list buffer\n");
+ "Failed to allocate ioc init request\n");
return -ENOMEM;
}
@@ -7436,7 +7435,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
instance->flag_ieee = 1;
- megasas_dbg_lvl = 0;
instance->flag = 0;
instance->unload = 1;
instance->last_time = 0;
@@ -7963,7 +7961,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
struct Scsi_Host *host;
struct megasas_instance *instance;
struct fusion_context *fusion;
- u32 pd_seq_map_sz;
+ size_t pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
@@ -8035,9 +8033,9 @@ skip_firing_dcmds:
if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
- (sizeof(struct MR_PD_CFG_SEQ) *
- (MAX_PHYSICAL_DEVICES - 1));
+ pd_seq_map_sz =
+ struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0,
+ seq, MAX_PHYSICAL_DEVICES);
for (i = 0; i < 2 ; i++) {
if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
@@ -8759,33 +8757,26 @@ static
int megasas_update_device_list(struct megasas_instance *instance,
int event_type)
{
- int dcmd_ret = DCMD_SUCCESS;
+ int dcmd_ret;
if (instance->enable_fw_dev_list) {
- dcmd_ret = megasas_host_device_list_query(instance, false);
- if (dcmd_ret != DCMD_SUCCESS)
- goto out;
+ return megasas_host_device_list_query(instance, false);
} else {
if (event_type & SCAN_PD_CHANNEL) {
dcmd_ret = megasas_get_pd_list(instance);
-
if (dcmd_ret != DCMD_SUCCESS)
- goto out;
+ return dcmd_ret;
}
if (event_type & SCAN_VD_CHANNEL) {
if (!instance->requestorId ||
megasas_get_ld_vf_affiliation(instance, 0)) {
- dcmd_ret = megasas_ld_list_query(instance,
+ return megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
- if (dcmd_ret != DCMD_SUCCESS)
- goto out;
}
}
}
-
-out:
- return dcmd_ret;
+ return DCMD_SUCCESS;
}
/**
@@ -8915,7 +8906,7 @@ megasas_aen_polling(struct work_struct *work)
sdev1 = scsi_device_lookup(instance->host,
MEGASAS_MAX_PD_CHANNELS +
(ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
- (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
+ (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL),
0);
if (sdev1)
megasas_remove_scsi_device(sdev1);
@@ -9013,6 +9004,7 @@ static int __init megasas_init(void)
*/
pr_info("megasas: %s\n", MEGASAS_VERSION);
+ megasas_dbg_lvl = 0;
support_poll_for_event = 2;
support_device_change = 1;
support_nvme_encapsulation = true;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 83f69c33b01a..da1cad1ee123 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -326,9 +326,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
else if (instance->supportmax256vd)
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
else
- expected_size =
- (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
+ expected_size = struct_size((struct MR_FW_RAID_MAP *)0,
+ ldSpanMap,
+ le16_to_cpu(pDrvRaidMap->ldCount));
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index fc90a0a687b5..6650f8c8e9b0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1310,7 +1310,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
- pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
+ pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES);
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -2047,8 +2047,6 @@ map_cmd_status(struct fusion_context *fusion,
scmd->result = (DID_OK << 16) | ext_status;
if (ext_status == SAM_STAT_CHECK_CONDITION) {
- memset(scmd->sense_buffer, 0,
- SCSI_SENSE_BUFFERSIZE);
memcpy(scmd->sense_buffer, sense,
SCSI_SENSE_BUFFERSIZE);
}
@@ -2915,7 +2913,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
get_updated_dev_handle(instance,
&fusion->load_balance_info[device_id],
&io_info, local_map_ptr);
- scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(scp)->status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
if (instance->adapter_type >= VENTURA_SERIES)
rctx_g35->span_arm = io_info.span_arm;
@@ -2923,7 +2921,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
rctx->span_arm = io_info.span_arm;
} else
- scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(scp)->status &= ~MEGASAS_LOAD_BALANCE_FLAG;
if (instance->adapter_type >= VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
@@ -3201,7 +3199,6 @@ megasas_build_io_fusion(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd)
{
int sge_count;
- u8 cmd_type;
u16 pd_index = 0;
u8 drive_type = 0;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
@@ -3227,7 +3224,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
*/
io_request->IoFlags = cpu_to_le16(scp->cmd_len);
- switch (cmd_type = megasas_cmd_type(scp)) {
+ switch (megasas_cmd_type(scp)) {
case READ_WRITE_LDIO:
megasas_build_ldio_fusion(instance, scp, cmd);
break;
@@ -3293,7 +3290,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
- scp->SCp.ptr = (char *)cmd;
+ megasas_priv(scp)->cmd_priv = cmd;
return 0;
}
@@ -3489,7 +3486,7 @@ megasas_complete_r1_command(struct megasas_instance *instance,
if (instance->ldio_threshold &&
megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
atomic_dec(&instance->ldio_outstanding);
- scmd_local->SCp.ptr = NULL;
+ megasas_priv(scmd_local)->cmd_priv = NULL;
megasas_return_cmd_fusion(instance, cmd);
scsi_dma_unmap(scmd_local);
megasas_sdev_busy_dec(instance, scmd_local);
@@ -3613,12 +3610,13 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
/* Update load balancing info */
if (fusion->load_balance_info &&
- (cmd_fusion->scmd->SCp.Status &
+ (megasas_priv(cmd_fusion->scmd)->status &
MEGASAS_LOAD_BALANCE_FLAG)) {
device_id = MEGASAS_DEV_INDEX(scmd_local);
lbinfo = &fusion->load_balance_info[device_id];
atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
- cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
+ megasas_priv(cmd_fusion->scmd)->status &=
+ ~MEGASAS_LOAD_BALANCE_FLAG;
}
fallthrough; /* and complete IO */
case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
@@ -3630,7 +3628,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
if (instance->ldio_threshold &&
(megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
atomic_dec(&instance->ldio_outstanding);
- scmd_local->SCp.ptr = NULL;
+ megasas_priv(scmd_local)->cmd_priv = NULL;
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
megasas_sdev_busy_dec(instance, scmd_local);
@@ -5099,8 +5097,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
if (instance->adapter_type >= VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
- 0, sizeof(struct LD_STREAM_DETECT));
- fusion->stream_detect_by_ld[j]->mru_bit_map
+ 0, sizeof(struct LD_STREAM_DETECT));
+ fusion->stream_detect_by_ld[j]->mru_bit_map
= MR_STREAM_BITMAP;
}
}
@@ -5312,7 +5310,6 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
if (!fusion->log_to_span) {
dev_err(&instance->pdev->dev, "Failed from %s %d\n",
__func__, __LINE__);
- kfree(instance->ctrl_context);
return -ENOMEM;
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index ce84f811e5e1..49e9a9048ee7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -942,7 +942,7 @@ struct MR_FW_RAID_MAP {
u8 reserved2[7];
struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
struct IO_REQUEST_INFO {
@@ -1053,7 +1053,7 @@ struct MR_FW_RAID_MAP_DYNAMIC {
struct MR_RAID_MAP_DESC_TABLE
raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
/* Variable Size buffer containing all data */
- u32 raid_map_desc_data[1];
+ u32 raid_map_desc_data[];
}; /* Dynamicaly sized RAID MAp structure */
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -1148,7 +1148,7 @@ typedef struct LOG_BLOCK_SPAN_INFO {
struct MR_FW_RAID_MAP_ALL {
struct MR_FW_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES];
} __attribute__ ((packed));
struct MR_DRV_RAID_MAP {
@@ -1182,7 +1182,7 @@ struct MR_DRV_RAID_MAP {
devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
- struct MR_LD_SPAN_MAP ldSpanMap[1];
+ struct MR_LD_SPAN_MAP ldSpanMap[];
};
@@ -1193,7 +1193,7 @@ struct MR_DRV_RAID_MAP {
struct MR_DRV_RAID_MAP_ALL {
struct MR_DRV_RAID_MAP raidMap;
- struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN];
} __packed;
@@ -1249,7 +1249,7 @@ struct MR_PD_CFG_SEQ {
struct MR_PD_CFG_SEQ_NUM_SYNC {
__le32 size;
__le32 count;
- struct MR_PD_CFG_SEQ seq[1];
+ struct MR_PD_CFG_SEQ seq[];
} __packed;
/* stream detection */
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index ca133e0a140a..84b541a57b7b 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -38,7 +38,7 @@
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/processor.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pmac_feature.h>
#include <asm/macio.h>
@@ -586,10 +586,12 @@ static void mesh_done(struct mesh_state *ms, int start_next)
ms->current_req = NULL;
tp->current_req = NULL;
if (cmd) {
+ struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
+
set_host_byte(cmd, ms->stat);
- set_status_byte(cmd, cmd->SCp.Status);
+ set_status_byte(cmd, mcmd->status);
if (ms->stat == DID_OK)
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
+ scsi_msg_to_host_byte(cmd, mcmd->message);
if (DEBUG_TARGET(cmd)) {
printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
cmd->result, ms->data_ptr, scsi_bufflen(cmd));
@@ -603,7 +605,7 @@ static void mesh_done(struct mesh_state *ms, int start_next)
}
#endif
}
- cmd->SCp.this_residual -= ms->data_ptr;
+ mcmd->this_residual -= ms->data_ptr;
scsi_done(cmd);
}
if (start_next) {
@@ -1171,7 +1173,7 @@ static void handle_msgin(struct mesh_state *ms)
if (ms->n_msgin < msgin_length(ms))
goto reject;
if (cmd)
- cmd->SCp.Message = code;
+ mesh_priv(cmd)->message = code;
switch (code) {
case COMMAND_COMPLETE:
break;
@@ -1262,7 +1264,7 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
if (cmd) {
int nseg;
- cmd->SCp.this_residual = scsi_bufflen(cmd);
+ mesh_priv(cmd)->this_residual = scsi_bufflen(cmd);
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
@@ -1592,10 +1594,12 @@ static void cmd_complete(struct mesh_state *ms)
break;
case statusing:
if (cmd) {
- cmd->SCp.Status = mr->fifo;
+ struct mesh_cmd_priv *mcmd = mesh_priv(cmd);
+
+ mcmd->status = mr->fifo;
if (DEBUG_TARGET(cmd))
printk(KERN_DEBUG "mesh: status is %x\n",
- cmd->SCp.Status);
+ mcmd->status);
}
ms->msgphase = msg_in;
break;
@@ -1837,6 +1841,7 @@ static struct scsi_host_template mesh_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 2,
.max_segment_size = 65535,
+ .cmd_size = sizeof(struct mesh_cmd_priv),
};
static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
@@ -1877,11 +1882,6 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
goto out_release;
}
- /* Old junk for root discovery, that will die ultimately */
-#if !defined(MODULE)
- note_scsi_host(mesh, mesh_host);
-#endif
-
mesh_host->base = macio_resource_start(mdev, 0);
mesh_host->irq = macio_irq(mdev, 0);
ms = (struct mesh_state *) mesh_host->hostdata;
diff --git a/drivers/scsi/mesh.h b/drivers/scsi/mesh.h
index ee53c05ace95..f70181acceac 100644
--- a/drivers/scsi/mesh.h
+++ b/drivers/scsi/mesh.h
@@ -8,6 +8,17 @@
#ifndef _MESH_H
#define _MESH_H
+struct mesh_cmd_priv {
+ int this_residual;
+ int message;
+ int status;
+};
+
+static inline struct mesh_cmd_priv *mesh_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/*
* Registers in the MESH controller.
*/
diff --git a/drivers/scsi/mpi3mr/Kconfig b/drivers/scsi/mpi3mr/Kconfig
index f7882375e74f..f48740cd5b95 100644
--- a/drivers/scsi/mpi3mr/Kconfig
+++ b/drivers/scsi/mpi3mr/Kconfig
@@ -3,5 +3,7 @@
config SCSI_MPI3MR
tristate "Broadcom MPI3 Storage Controller Device Driver"
depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ select SCSI_SAS_ATTRS
help
MPI3 based Storage & RAID Controllers Driver.
diff --git a/drivers/scsi/mpi3mr/Makefile b/drivers/scsi/mpi3mr/Makefile
index 7c2063e04c81..ef86ca46646b 100644
--- a/drivers/scsi/mpi3mr/Makefile
+++ b/drivers/scsi/mpi3mr/Makefile
@@ -2,3 +2,5 @@
obj-m += mpi3mr.o
mpi3mr-y += mpi3mr_os.o \
mpi3mr_fw.o \
+ mpi3mr_app.o \
+ mpi3mr_transport.o
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index d43bbecef651..0a2af48915a5 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2017-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2017-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_CNFG_H
#define MPI30_CNFG_H 1
#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
-#define MPI3_CONFIG_PAGETYPE_UEFI_BSD (0x03)
+#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03)
#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
@@ -100,6 +99,7 @@ struct mpi3_config_page_header {
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xf0)
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0f)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
#define MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
#define MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
#define MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
@@ -135,6 +135,16 @@ struct mpi3_config_page_header {
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
+#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_SHIFT (0)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_MASK (0x04000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_CHANGED_SHIFT (26)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_MASK (0x02000000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT_SHIFT (25)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_MASK (0x01000000)
+#define MPI3_SAS_PHYINFO_REQUESTED_INSIDE_ZPSDS_SHIFT (24)
+#define MPI3_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI3_SAS_PHYINFO_INSIDE_ZPSDS_WITHIN (0x00200000)
+#define MPI3_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
#define MPI3_SAS_PHYINFO_REASON_MASK (0x000f0000)
#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
@@ -181,8 +191,16 @@ struct mpi3_config_page_header {
#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
-#define MPI3_SLOT_INVALID (0xffff)
-#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_SLOT_INVALID (0xffff)
+#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0)
+#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1)
+#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2)
+#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
+#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
+#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
@@ -194,20 +212,22 @@ struct mpi3_man_page0 {
__le32 reserved94;
__le32 reserved98;
u8 oem;
- u8 sub_oem;
- __le16 reserved9e;
+ u8 profile_identifier;
+ __le16 flags;
u8 board_mfg_day;
u8 board_mfg_month;
__le16 board_mfg_year;
u8 board_rework_day;
u8 board_rework_month;
__le16 board_rework_year;
- __le64 board_revision;
+ u8 board_revision[8];
u8 e_pack_fru[16];
u8 product_name[256];
};
#define MPI3_MAN0_PAGEVERSION (0x00)
+#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002)
+#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001)
#define MPI3_MAN1_VPD_SIZE (512)
struct mpi3_man_page1 {
struct mpi3_config_page_header header;
@@ -216,6 +236,15 @@ struct mpi3_man_page1 {
};
#define MPI3_MAN1_PAGEVERSION (0x00)
+struct mpi3_man_page2 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 reserved0c[3];
+ u8 oem_board_tracer_number[32];
+};
+#define MPI3_MAN2_PAGEVERSION (0x00)
+#define MPI3_MAN2_FLAGS_TRACER_PRESENT (0x01)
struct mpi3_man5_phy_entry {
__le64 ioc_wwid;
__le64 device_name;
@@ -236,7 +265,7 @@ struct mpi3_man_page5 {
#define MPI3_MAN5_PAGEVERSION (0x00)
struct mpi3_man6_gpio_entry {
u8 function_code;
- u8 reserved01;
+ u8 function_flags;
__le16 flags;
u8 param1;
u8 param2;
@@ -253,16 +282,24 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
-#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_MUX_RESET (0x09)
#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
-#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ATTN (0x0d)
+#define MPI3_MAN6_GPIO_FUNCTION_PBLP_STATUS_CHANGE (0x0d)
#define MPI3_MAN6_GPIO_FUNCTION_EPACK_ONLINE (0x0e)
#define MPI3_MAN6_GPIO_FUNCTION_EPACK_FAULT (0x0f)
#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13)
+#define MPI3_MAN6_GPIO_FUNCTION_AUXILIARY_POWER (0x14)
+#define MPI3_MAN6_GPIO_FUNCTION_RAID_DATA_CACHE_DIRTY (0x15)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_CONTROL (0x16)
+#define MPI3_MAN6_GPIO_FUNCTION_BOARD_FAN_FAULT (0x17)
+#define MPI3_MAN6_GPIO_FUNCTION_POWER_BRAKE (0x18)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
@@ -275,8 +312,6 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
-#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_SPEC_MUX (0x00)
-#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_ALL_MUXES (0x01)
#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
@@ -322,6 +357,8 @@ struct mpi3_man7_receptacle_info {
#define MPI3_MAN7_LOCATION_INTERNAL (0x01)
#define MPI3_MAN7_LOCATION_EXTERNAL (0x02)
#define MPI3_MAN7_LOCATION_VIRTUAL (0x03)
+#define MPI3_MAN7_LOCATION_HOST (0x04)
+#define MPI3_MAN7_CONNECTOR_TYPE_NO_INFO (0x00)
#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
@@ -353,6 +390,8 @@ struct mpi3_man8_phy_info {
__le32 reserved0c;
};
+#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_NOT_ASSOCIATED (0xff)
+#define MPI3_MAN8_PHY_INFO_CONNECTOR_LANE_NOT_ASSOCIATED (0xff)
#ifndef MPI3_MAN8_PHY_INFO_MAX
#define MPI3_MAN8_PHY_INFO_MAX (1)
#endif
@@ -373,20 +412,22 @@ struct mpi3_man9_rsrc_entry {
};
enum mpi3_man9_resources {
- MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
- MPI3_MAN9_RSRC_TARGET_CMDS = 1,
- MPI3_MAN9_RSRC_SAS_TARGETS = 2,
- MPI3_MAN9_RSRC_PCIE_TARGETS = 3,
- MPI3_MAN9_RSRC_INITIATORS = 4,
- MPI3_MAN9_RSRC_VDS = 5,
- MPI3_MAN9_RSRC_ENCLOSURES = 6,
- MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
- MPI3_MAN9_RSRC_EXPANDERS = 8,
- MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
- MPI3_MAN9_RSRC_PDS = 10,
- MPI3_MAN9_RSRC_HOST_PDS = 11,
- MPI3_MAN9_RSRC_ADV_HOST_PDS = 12,
- MPI3_MAN9_RSRC_RAID_PDS = 13,
+ MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
+ MPI3_MAN9_RSRC_TARGET_CMDS = 1,
+ MPI3_MAN9_RSRC_RESERVED02 = 2,
+ MPI3_MAN9_RSRC_NVME = 3,
+ MPI3_MAN9_RSRC_INITIATORS = 4,
+ MPI3_MAN9_RSRC_VDS = 5,
+ MPI3_MAN9_RSRC_ENCLOSURES = 6,
+ MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
+ MPI3_MAN9_RSRC_EXPANDERS = 8,
+ MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
+ MPI3_MAN9_RSRC_RESERVED10 = 10,
+ MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11,
+ MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12,
+ MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13,
+ MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14,
+ MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15,
MPI3_MAN9_RSRC_NUM_RESOURCES
};
@@ -394,17 +435,22 @@ enum mpi3_man9_resources {
#define MPI3_MAN9_MAX_OUTSTANDING_REQS (65000)
#define MPI3_MAN9_MIN_TARGET_CMDS (0)
#define MPI3_MAN9_MAX_TARGET_CMDS (65535)
-#define MPI3_MAN9_MIN_SAS_TARGETS (0)
-#define MPI3_MAN9_MAX_SAS_TARGETS (65535)
-#define MPI3_MAN9_MIN_PCIE_TARGETS (0)
+#define MPI3_MAN9_MIN_NVME_TARGETS (0)
#define MPI3_MAN9_MIN_INITIATORS (0)
-#define MPI3_MAN9_MAX_INITIATORS (65535)
-#define MPI3_MAN9_MIN_ENCLOSURES (0)
+#define MPI3_MAN9_MIN_VDS (0)
+#define MPI3_MAN9_MIN_ENCLOSURES (1)
#define MPI3_MAN9_MAX_ENCLOSURES (65535)
#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
#define MPI3_MAN9_MIN_EXPANDERS (0)
#define MPI3_MAN9_MAX_EXPANDERS (65535)
#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
+#define MPI3_MAN9_MIN_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_ADV_HOST_PD_DRIVES (0)
+#define MPI3_MAN9_RAID_PD_DRIVES (0)
+#define MPI3_MAN9_DRIVER_DIAG_BUFFER (0)
+#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
+#define MPI3_MAN9_MIN_EXPANDERS (0)
+#define MPI3_MAN9_MAX_EXPANDERS (65535)
struct mpi3_man_page9 {
struct mpi3_config_page_header header;
u8 num_resources;
@@ -422,9 +468,14 @@ struct mpi3_man_page9 {
struct mpi3_man10_istwi_ctrlr_entry {
__le16 slave_address;
__le16 flags;
- __le32 reserved04;
+ u8 scl_low_override;
+ u8 scl_high_override;
+ __le16 reserved06;
};
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
@@ -451,10 +502,13 @@ struct mpi3_man11_temp_sensor_device_format {
u8 temp_channel[4];
};
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
-#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
struct mpi3_man11_seeprom_device_format {
u8 size;
u8 page_write_size;
@@ -495,31 +549,44 @@ struct mpi3_man11_bkplane_spec_ubm_format {
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
-struct mpi3_man11_bkplane_spec_vpp_format {
+struct mpi3_man11_bkplane_spec_non_ubm_format {
__le16 flags;
- __le16 reserved02;
+ u8 reserved02;
+ u8 type;
};
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0040)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_MASK (0x0030)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_REG (0x0010)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_MASK (0x000f)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_MASK (0x00c0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_4 (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_2 (0x0040)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_LINKWIDTH_1 (0x0080)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00)
union mpi3_man11_bkplane_spec_format {
- struct mpi3_man11_bkplane_spec_ubm_format ubm;
- struct mpi3_man11_bkplane_spec_vpp_format vpp;
+ struct mpi3_man11_bkplane_spec_ubm_format ubm;
+ struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm;
};
struct mpi3_man11_bkplane_mgmt_device_format {
u8 type;
u8 receptacle_id;
- __le16 reserved02;
+ u8 reset_info;
+ u8 reserved03;
union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
};
#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
-#define MPI3_MAN11_BKPLANE_MGMT_TYPE_VPP (0x01)
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0)
struct mpi3_man11_gas_gauge_device_format {
u8 type;
u8 reserved01[3];
@@ -527,6 +594,19 @@ struct mpi3_man11_gas_gauge_device_format {
};
#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
+struct mpi3_man11_mgmt_ctrlr_device_format {
+ __le32 reserved00;
+ __le32 reserved04;
+};
+struct mpi3_man11_board_fan_device_format {
+ u8 flags;
+ u8 reserved01;
+ u8 min_fan_speed;
+ u8 max_fan_speed;
+ __le32 reserved04;
+};
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_MASK (0x07)
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_AMC6821 (0x00)
union mpi3_man11_device_specific_format {
struct mpi3_man11_mux_device_format mux;
struct mpi3_man11_temp_sensor_device_format temp_sensor;
@@ -535,9 +615,10 @@ union mpi3_man11_device_specific_format {
struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
struct mpi3_man11_gas_gauge_device_format gas_gauge;
+ struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller;
+ struct mpi3_man11_board_fan_device_format board_fan;
__le32 words[2];
};
-
struct mpi3_man11_istwi_device_format {
u8 device_type;
u8 controller;
@@ -556,10 +637,9 @@ struct mpi3_man11_istwi_device_format {
#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
+#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07)
+#define MPI3_MAN11_ISTWI_DEVTYPE_BOARD_FAN (0x08)
#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_MASK (0x06)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_100KHZ (0x00)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_400KHZ (0x02)
#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
#endif
@@ -680,20 +760,16 @@ struct mpi3_man_page13 {
#define MPI3_MAN13_PAGEVERSION (0x00)
struct mpi3_man_page14 {
struct mpi3_config_page_header header;
- __le16 flags;
- __le16 reserved0a;
+ __le32 reserved08;
u8 num_slot_groups;
u8 num_slots;
__le16 max_cert_chain_length;
__le32 sealed_slots;
+ __le32 populated_slots;
+ __le32 mgmt_pt_updatable_slots;
};
-
#define MPI3_MAN14_PAGEVERSION (0x00)
-#define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01)
-#define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e)
-#define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00)
-#define MPI3_MAN14_FLAGS_AUTH_API_CEREBUS (0x02)
-#define MPI3_MAN14_FLAGS_AUTH_API_DMTF_PMCI (0x04)
+#define MPI3_MAN14_NUMSLOTS_MAX (32)
#ifndef MPI3_MAN15_VERSION_RECORD_MAX
#define MPI3_MAN15_VERSION_RECORD_MAX 1
#endif
@@ -775,19 +851,16 @@ struct mpi3_man_page21 {
};
#define MPI3_MAN21_PAGEVERSION (0x00)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_MASK (0x80)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_ENABLED (0x80)
-#define MPI3_MAN21_FLAGS_HOST_METADATA_CAPABILITY_DISABLED (0x00)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x60)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x20)
-#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x40)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x08)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00)
-#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x08)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x01)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00)
-#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x01)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x00000060)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00000000)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x00000020)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x00000040)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x00000008)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00000000)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x00000008)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x00000001)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00000000)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x00000001)
#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
#define MPI3_MAN_PROD_SPECIFIC_MAX (1)
#endif
@@ -808,7 +881,7 @@ struct mpi3_io_unit_page1 {
struct mpi3_config_page_header header;
__le32 flags;
u8 dmd_io_delay;
- u8 dmd_report_pc_ie;
+ u8 dmd_report_pcie;
u8 dmd_report_sata;
u8 dmd_report_sas;
};
@@ -844,26 +917,30 @@ struct mpi3_io_unit_page2 {
#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
struct mpi3_io_unit3_sensor {
__le16 flags;
- __le16 reserved02;
- __le16 threshold[4];
+ u8 threshold_margin;
+ u8 reserved03;
+ __le16 threshold[3];
+ __le16 reserved0a;
__le32 reserved0c;
__le32 reserved10;
__le32 reserved14;
};
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T3_ENABLE (0x0008)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T2_ENABLE (0x0004)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T1_ENABLE (0x0002)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T0_ENABLE (0x0001)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001)
#ifndef MPI3_IO_UNIT3_SENSOR_MAX
-#define MPI3_IO_UNIT3_SENSOR_MAX (1)
+#define MPI3_IO_UNIT3_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page3 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_sensors;
- u8 polling_interval;
- __le16 reserved0e;
+ u8 nominal_poll_interval;
+ u8 warning_poll_interval;
+ u8 reserved0f;
struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
};
@@ -873,13 +950,19 @@ struct mpi3_io_unit4_sensor {
__le16 reserved02;
u8 flags;
u8 reserved05[3];
- __le32 reserved08;
+ __le16 istwi_index;
+ u8 channel;
+ u8 reserved0b;
__le32 reserved0c;
};
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0)
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5)
#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
+#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff)
+#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff)
#ifndef MPI3_IO_UNIT4_SENSOR_MAX
-#define MPI3_IO_UNIT4_SENSOR_MAX (1)
+#define MPI3_IO_UNIT4_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page4 {
struct mpi3_config_page_header header;
@@ -906,8 +989,9 @@ struct mpi3_io_unit_page5 {
struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
__le32 reserved18;
__le32 reserved1c;
- __le32 reserved20;
- u8 reserved24;
+ __le16 device_shutdown;
+ __le16 reserved22;
+ u8 pcie_device_wait_time;
u8 sata_device_wait_time;
u8 spinup_encl_drive_count;
u8 spinup_encl_delay;
@@ -919,6 +1003,27 @@ struct mpi3_io_unit_page5 {
};
#define MPI3_IOUNIT5_PAGEVERSION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_SHIFT (0)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_MASK (0x0c)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_NOT_SUPPORTED (0x00)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_OS_CONTROLLED (0x04)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_APP_CONTROLLED (0x08)
+#define MPI3_IOUNIT5_FLAGS_SATAPUIS_BLOCKED (0x0c)
#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
@@ -932,12 +1037,6 @@ struct mpi3_io_unit_page6 {
#define MPI3_IOUNIT6_PAGEVERSION (0x00)
#define MPI3_IOUNIT6_FLAGS_ACT_CABLE_PWR_EXC (0x01)
-struct mpi3_io_unit_page7 {
- struct mpi3_config_page_header header;
- __le32 reserved08;
-};
-
-#define MPI3_IOUNIT7_PAGEVERSION (0x00)
#ifndef MPI3_IOUNIT8_DIGEST_MAX
#define MPI3_IOUNIT8_DIGEST_MAX (1)
#endif
@@ -956,7 +1055,8 @@ struct mpi3_io_unit_page8 {
u8 slots_available;
u8 current_key_encryption_algo;
u8 key_digest_hash_algo;
- __le32 reserved10[2];
+ union mpi3_version_union current_svn;
+ __le32 reserved14;
__le32 current_key[128];
union mpi3_iounit8_digest digest[MPI3_IOUNIT8_DIGEST_MAX];
};
@@ -965,6 +1065,7 @@ struct mpi3_io_unit_page8 {
#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
struct mpi3_io_unit_page9 {
@@ -974,9 +1075,107 @@ struct mpi3_io_unit_page9 {
__le16 reserved0e;
};
-#define MPI3_IOUNIT9_PAGEVERSION (0x00)
-#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x01)
-#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+#define MPI3_IOUNIT9_PAGEVERSION (0x00)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_MASK (0x00000006)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_SHIFT (1)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_NONE (0x00000000)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_RECEPTACLE (0x00000002)
+#define MPI3_IOUNIT9_FLAGS_UBM_ENCLOSURE_ORDER_BACKPLANE_TYPE (0x00000004)
+#define MPI3_IOUNIT9_FLAGS_VDFIRST_ENABLED (0x00000001)
+#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xffff)
+struct mpi3_io_unit_page10 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 reserved09[3];
+ __le32 silicon_id;
+ u8 fw_version_minor;
+ u8 fw_version_major;
+ u8 hw_version_minor;
+ u8 hw_version_major;
+ u8 part_number[16];
+};
+#define MPI3_IOUNIT10_PAGEVERSION (0x00)
+#define MPI3_IOUNIT10_FLAGS_VALID (0x01)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_MASK (0x02)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_FIRST_REGION (0x00)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_SECOND_REGION (0x02)
+#define MPI3_IOUNIT10_FLAGS_PBLP_EXPECTED (0x80)
+#ifndef MPI3_IOUNIT11_PROFILE_MAX
+#define MPI3_IOUNIT11_PROFILE_MAX (1)
+#endif
+struct mpi3_iounit11_profile {
+ u8 profile_identifier;
+ u8 reserved01[3];
+ __le16 max_vds;
+ __le16 max_host_pds;
+ __le16 max_adv_host_pds;
+ __le16 max_raid_pds;
+ __le16 max_nvme;
+ __le16 max_outstanding_requests;
+ __le16 subsystem_id;
+ __le16 reserved12;
+ __le32 reserved14[2];
+};
+struct mpi3_io_unit_page11 {
+ struct mpi3_config_page_header header;
+ __le32 reserved08;
+ u8 num_profiles;
+ u8 current_profile_identifier;
+ __le16 reserved0e;
+ struct mpi3_iounit11_profile profile[MPI3_IOUNIT11_PROFILE_MAX];
+};
+#define MPI3_IOUNIT11_PAGEVERSION (0x00)
+#ifndef MPI3_IOUNIT12_BUCKET_MAX
+#define MPI3_IOUNIT12_BUCKET_MAX (1)
+#endif
+struct mpi3_iounit12_bucket {
+ u8 coalescing_depth;
+ u8 coalescing_timeout;
+ __le16 io_count_low_boundary;
+ __le32 reserved04;
+};
+struct mpi3_io_unit_page12 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c[4];
+ u8 num_buckets;
+ u8 reserved1d[3];
+ struct mpi3_iounit12_bucket bucket[MPI3_IOUNIT12_BUCKET_MAX];
+};
+#define MPI3_IOUNIT12_PAGEVERSION (0x00)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_MASK (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_SHIFT (8)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_8 (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_16 (0x00000100)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_32 (0x00000200)
+#define MPI3_IOUNIT12_FLAGS_NUMPASSES_64 (0x00000300)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_MASK (0x00000003)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_DISABLED (0x00000000)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_500US (0x00000001)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_1MS (0x00000002)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_2MS (0x00000003)
+#ifndef MPI3_IOUNIT13_FUNC_MAX
+#define MPI3_IOUNIT13_FUNC_MAX (1)
+#endif
+struct mpi3_iounit13_allowed_function {
+ __le16 sub_function;
+ u8 function_code;
+ u8 fuction_flags;
+};
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_ADMIN_BLOCKED (0x04)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_OOB_BLOCKED (0x02)
+#define MPI3_IOUNIT13_FUNCTION_FLAGS_CHECK_SUBFUNCTION_ENABLED (0x01)
+struct mpi3_io_unit_page13 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_functions;
+ u8 reserved0d[3];
+ struct mpi3_iounit13_allowed_function allowed_function[MPI3_IOUNIT13_FUNC_MAX];
+};
+#define MPI3_IOUNIT13_PAGEVERSION (0x00)
+#define MPI3_IOUNIT13_FLAGS_ADMIN_BLOCKED (0x0002)
+#define MPI3_IOUNIT13_FLAGS_OOB_BLOCKED (0x0001)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -994,12 +1193,10 @@ struct mpi3_ioc_page1 {
struct mpi3_config_page_header header;
__le32 coalescing_timeout;
u8 coalescing_depth;
- u8 pci_slot_num;
+ u8 obsolete;
__le16 reserved0e;
};
-
#define MPI3_IOC1_PAGEVERSION (0x00)
-#define MPI3_IOC1_PCISLOTNUM_UNKNOWN (0xff)
#ifndef MPI3_IOC2_EVENTMASK_WORDS
#define MPI3_IOC2_EVENTMASK_WORDS (4)
#endif
@@ -1012,7 +1209,52 @@ struct mpi3_ioc_page2 {
};
#define MPI3_IOC2_PAGEVERSION (0x00)
-struct mpi3_uefibsd_page0 {
+#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010)
+#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008)
+#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004)
+#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002)
+#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001)
+struct mpi3_allowed_cmd_scsi {
+ __le16 service_action;
+ u8 operation_code;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_ata {
+ u8 subcommand;
+ u8 reserved01;
+ u8 command;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_nvme {
+ u8 reserved00;
+ u8 nvme_cmd_flags;
+ u8 op_code;
+ u8 command_flags;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00)
+union mpi3_allowed_cmd {
+ struct mpi3_allowed_cmd_scsi scsi;
+ struct mpi3_allowed_cmd_ata ata;
+ struct mpi3_allowed_cmd_nvme nvme;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01)
+#ifndef MPI3_ALLOWED_CMDS_MAX
+#define MPI3_ALLOWED_CMDS_MAX (1)
+#endif
+struct mpi3_driver_page0 {
struct mpi3_config_page_header header;
__le32 bsd_options;
u8 ssu_timeout;
@@ -1025,14 +1267,122 @@ struct mpi3_uefibsd_page0 {
__le32 reserved14;
__le32 reserved18;
};
+#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008)
+#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+struct mpi3_driver_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c;
+ __le16 host_diag_trace_max_size;
+ __le16 host_diag_trace_min_size;
+ __le16 host_diag_trace_decrement_size;
+ __le16 reserved16;
+ __le16 host_diag_fw_max_size;
+ __le16 host_diag_fw_min_size;
+ __le16 host_diag_fw_decrement_size;
+ __le16 reserved1e;
+ __le16 host_diag_driver_max_size;
+ __le16 host_diag_driver_min_size;
+ __le16 host_diag_driver_decrement_size;
+ __le16 reserved26;
+};
+
+#define MPI3_DRIVER1_PAGEVERSION (0x00)
+#ifndef MPI3_DRIVER2_TRIGGER_MAX
+#define MPI3_DRIVER2_TRIGGER_MAX (1)
+#endif
+struct mpi3_driver2_trigger_event {
+ u8 type;
+ u8 flags;
+ u8 reserved02;
+ u8 event;
+ __le32 reserved04[3];
+};
+
+struct mpi3_driver2_trigger_scsi_sense {
+ u8 type;
+ u8 flags;
+ __le16 reserved02;
+ u8 ascq;
+ u8 asc;
+ u8 sense_key;
+ u8 reserved07;
+ __le32 reserved08[2];
+};
-#define MPI3_UEFIBSD_PAGEVERSION (0x00)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_MASK (0x00000003)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_NONE (0x00000002)
-#define MPI3_UEFIBSD_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
-#define MPI3_UEFIBSD_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff)
+struct mpi3_driver2_trigger_reply {
+ u8 type;
+ u8 flags;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 ioc_log_info_mask;
+ __le32 reserved0c;
+};
+
+#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff)
+union mpi3_driver2_trigger_element {
+ struct mpi3_driver2_trigger_event event;
+ struct mpi3_driver2_trigger_scsi_sense scsi_sense;
+ struct mpi3_driver2_trigger_reply reply;
+};
+
+#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00)
+#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01)
+#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01)
+struct mpi3_driver_page2 {
+ struct mpi3_config_page_header header;
+ __le64 master_trigger;
+ __le32 reserved10[3];
+ u8 num_triggers;
+ u8 reserved1d[3];
+ union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX];
+};
+
+#define MPI3_DRIVER2_PAGEVERSION (0x00)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
+struct mpi3_driver_page10 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER10_PAGEVERSION (0x00)
+struct mpi3_driver_page20 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER20_PAGEVERSION (0x00)
+struct mpi3_driver_page30 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER30_PAGEVERSION (0x00)
union mpi3_security_mac {
__le32 dword[16];
__le16 word[32];
@@ -1102,7 +1452,7 @@ struct mpi3_security1_key_record {
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
-#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_AUTH_DEV_KEY (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
struct mpi3_security_page1 {
struct mpi3_config_page_header header;
@@ -1137,16 +1487,30 @@ struct mpi3_sas_io_unit_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_phys;
- u8 reserved0d[3];
+ u8 init_status;
+ __le16 reserved0e;
struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
};
-#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
-#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
-#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
-#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
-#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
-#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff)
+#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
struct mpi3_sas_io_unit1_phy_data {
u8 io_unit_port;
u8 port_flags;
@@ -1343,6 +1707,26 @@ struct mpi3_sas_expander_page1 {
#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS
+#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1)
+#endif
+struct mpi3_sasexpander2_phy_element {
+ u8 link_change_count;
+ u8 reserved01;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_sas_expander_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS];
+};
+
+#define MPI3_SASEXPANDER2_PAGEVERSION (0x00)
struct mpi3_sas_port_page0 {
struct mpi3_config_page_header header;
u8 port_number;
@@ -1510,6 +1894,14 @@ struct mpi3_sas_phy_page4 {
#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
+#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0)
+#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1)
+#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2)
+#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3)
+#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1)
+#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3)
struct mpi3_pcie_io_unit0_phy_data {
u8 link;
u8 link_flags;
@@ -1540,7 +1932,8 @@ struct mpi3_pcie_io_unit_page0 {
__le32 reserved08;
u8 num_phys;
u8 init_status;
- __le16 reserved0e;
+ u8 aspm;
+ u8 reserved0f;
struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
};
@@ -1556,6 +1949,14 @@ struct mpi3_pcie_io_unit_page0 {
#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0)
struct mpi3_pcie_io_unit1_phy_data {
u8 link;
u8 link_flags;
@@ -1569,16 +1970,16 @@ struct mpi3_pcie_io_unit1_phy_data {
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
-#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
+#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
-#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
+#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
#endif
struct mpi3_pcie_io_unit_page1 {
struct mpi3_config_page_header header;
@@ -1586,21 +1987,77 @@ struct mpi3_pcie_io_unit_page1 {
__le32 reserved0c;
u8 num_phys;
u8 reserved11;
- __le16 reserved12;
+ u8 aspm;
+ u8 reserved13;
struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
};
-#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_MASK (0xe0000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_DEASSERT (0x20000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_ASSERT (0x40000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_BACKPLANE_ERROR (0x60000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_MASK (0x1c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_NONE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DEASSERT (0x04000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ASSERT (0x08000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_BACKPLANE_ERROR (0x0c000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x00000080)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x00000040)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x00000030)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x00000010)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x00000020)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0000000f)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_USE_BACKPLANE (0x00000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x00000002)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x00000003)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x00000004)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x00000005)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x00000006)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0)
struct mpi3_pcie_io_unit_page2 {
struct mpi3_config_page_header header;
- __le16 nv_me_max_queue_depth;
- __le16 reserved0a;
- u8 nv_me_abort_to;
+ __le16 nvme_max_q_dx1;
+ __le16 nvme_max_q_dx2;
+ u8 nvme_abort_to;
u8 reserved0d;
- __le16 reserved0e;
+ __le16 nvme_max_q_dx4;
};
#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0)
+#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1)
+#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4)
+#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5)
+struct mpi3_pcie_io_unit3_error {
+ __le16 threshold_count;
+ __le16 reserved02;
+};
+
+struct mpi3_pcie_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ u8 threshold_window;
+ u8 threshold_action;
+ u8 escalation_count;
+ u8 escalation_action;
+ u8 num_errors;
+ u8 reserved0d[3];
+ struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX];
+};
+
+#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03)
struct mpi3_pcie_switch_page0 {
struct mpi3_config_page_header header;
u8 io_unit_port;
@@ -1609,7 +2066,7 @@ struct mpi3_pcie_switch_page0 {
__le16 dev_handle;
__le16 parent_dev_handle;
u8 num_ports;
- u8 pc_ie_level;
+ u8 pcie_level;
__le16 reserved12;
__le32 reserved14;
__le32 reserved18;
@@ -1623,7 +2080,8 @@ struct mpi3_pcie_switch_page0 {
struct mpi3_pcie_switch_page1 {
struct mpi3_config_page_header header;
u8 io_unit_port;
- u8 reserved09[3];
+ u8 flags;
+ __le16 reserved0a;
u8 num_ports;
u8 port_num;
__le16 attached_dev_handle;
@@ -1636,15 +2094,43 @@ struct mpi3_pcie_switch_page1 {
};
#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0)
+#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS
+#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1)
+#endif
+struct mpi3_pcieswitch2_port_element {
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_pcie_switch_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_ports;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS];
+};
+
+#define MPI3_PCIESWITCH2_PAGEVERSION (0x00)
struct mpi3_pcie_link_page0 {
struct mpi3_config_page_header header;
u8 link;
u8 reserved09[3];
- __le32 correctable_error_count;
- __le16 n_fatal_error_count;
- __le16 reserved12;
- __le16 fatal_error_count;
- __le16 reserved16;
+ __le32 reserved0c;
+ __le32 receiver_error_count;
+ __le32 recovery_count;
+ __le32 corr_error_msg_count;
+ __le32 non_fatal_error_msg_count;
+ __le32 fatal_error_msg_count;
+ __le32 non_fatal_error_count;
+ __le32 fatal_error_count;
+ __le32 bad_dllp_count;
+ __le32 bad_tlp_count;
};
#define MPI3_PCIELINK0_PAGEVERSION (0x00)
@@ -1654,11 +2140,12 @@ struct mpi3_enclosure_page0 {
__le16 flags;
__le16 enclosure_handle;
__le16 num_slots;
- __le16 start_slot;
+ __le16 reserved16;
u8 io_unit_port;
u8 enclosure_level;
__le16 sep_dev_handle;
- __le32 reserved1c;
+ u8 chassis_slot;
+ u8 reserved1d[3];
};
#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
@@ -1666,6 +2153,7 @@ struct mpi3_enclosure_page0 {
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
+#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
@@ -1686,6 +2174,7 @@ struct mpi3_device0_sas_sata_format {
u8 zone_group;
};
+#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400)
#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
@@ -1707,10 +2196,11 @@ struct mpi3_device0_pcie_format {
__le32 maximum_data_transfer_size;
__le32 capabilities;
__le16 noiob;
- u8 nv_me_abort_to;
+ u8 nvme_abort_to;
u8 page_size;
__le16 shutdown_latency;
- __le16 reserved16;
+ u8 recovery_info;
+ u8 reserved17;
};
#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
@@ -1718,25 +2208,48 @@ struct mpi3_device0_pcie_format {
#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
-#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020)
#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
-#define MPI3_DEVICE0_PCIE_CAP_NVME_SGL_ENABLED (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000)
#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05)
struct mpi3_device0_vd_format {
u8 vd_state;
u8 raid_level;
__le16 device_info;
__le16 flags;
- __le16 reserved06;
- __le32 reserved08[2];
+ __le16 io_throttle_group;
+ __le16 io_throttle_group_low;
+ __le16 io_throttle_group_high;
+ __le32 reserved0c;
};
-
#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
#define MPI3_DEVICE0_VD_STATE_DEGRADED (0x02)
@@ -1753,10 +2266,8 @@ struct mpi3_device0_vd_format {
#define MPI3_DEVICE0_VD_DEVICE_INFO_NVME (0x0004)
#define MPI3_DEVICE0_VD_DEVICE_INFO_SATA (0x0002)
#define MPI3_DEVICE0_VD_DEVICE_INFO_SAS (0x0001)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_MASK (0x0003)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_NONE (0x0000)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_HOST (0x0001)
-#define MPI3_DEVICE0_VD_FLAGS_METADATA_MODE_IOC (0x0002)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xf000)
+#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
union mpi3_device0_dev_spec_format {
struct mpi3_device0_sas_sata_format sas_sata_format;
struct mpi3_device0_pcie_format pcie_format;
@@ -1783,6 +2294,8 @@ struct mpi3_device_page0 {
};
#define MPI3_DEVICE0_PAGEVERSION (0x00)
+#define MPI3_DEVICE0_PARENT_INVALID (0xffff)
+#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000)
#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
@@ -1792,9 +2305,13 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
+#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06)
+#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07)
+#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f)
#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
+#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f)
#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
@@ -1810,6 +2327,8 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
+#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f)
#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
@@ -1820,19 +2339,28 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
-#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a)
+#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d)
+#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e)
+#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f)
+#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51)
+#define MPI3_DEVICE0_ASTATUS_NVME_TOO_MANY_ERRORS (0x52)
+#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
+#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
+#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
+#define MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED (0x0010)
#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
-#define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006)
-#define MPI3_DEVICE0_FLAGS_ATT_METHOD_NOT_DIR_ATTACHED (0x0000)
-#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
#define MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL (0x0004)
+#define MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED (0x0002)
#define MPI3_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
#define MPI3_DEVICE0_QUEUE_DEPTH_NOT_APPLICABLE (0x0000)
struct mpi3_device1_sas_sata_format {
__le32 reserved00;
};
-
struct mpi3_device1_pcie_format {
__le16 vendor_id;
__le16 device_id;
@@ -1870,11 +2398,17 @@ struct mpi3_device_page1 {
struct mpi3_config_page_header header;
__le16 dev_handle;
__le16 reserved0a;
- __le32 reserved0c[12];
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le16 tm_count;
+ __le16 reserved12;
+ __le32 reserved14[10];
u8 reserved3c[3];
u8 device_form;
union mpi3_device1_dev_spec_format device_specific;
};
#define MPI3_DEVICE1_PAGEVERSION (0x00)
+#define MPI3_DEVICE1_COUNTER_MAX (0xfffe)
+#define MPI3_DEVICE1_COUNTER_INVALID (0xffff)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 169e4f9b7b7c..64c58815988a 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2018-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2018-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IMAGE_H
#define MPI30_IMAGE_H 1
@@ -61,6 +60,11 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_RMC (0x20434d52)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
@@ -94,6 +98,61 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
#define MPI3_IMAGE_HEADER_SIZE (0x100)
+#ifndef MPI3_CI_MANIFEST_MPI_MAX
+#define MPI3_CI_MANIFEST_MPI_MAX (1)
+#endif
+struct mpi3_ci_manifest_mpi_comp_image_ref {
+ __le32 signature1;
+ __le32 reserved04[3];
+ struct mpi3_comp_image_version component_image_version;
+ __le32 component_image_version_string_offset;
+ __le32 crc;
+};
+
+struct mpi3_ci_manifest_mpi {
+ u8 manifest_type;
+ u8 reserved01[3];
+ __le32 reserved04[3];
+ u8 num_image_references;
+ u8 release_level;
+ __le16 reserved12;
+ __le16 reserved14;
+ __le16 flags;
+ __le32 reserved18[2];
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved28[2];
+ union mpi3_version_union package_security_version;
+ __le32 reserved34;
+ struct mpi3_comp_image_version package_version;
+ __le32 package_version_string_offset;
+ __le32 package_build_date_string_offset;
+ __le32 package_build_time_string_offset;
+ __le32 reserved4c;
+ __le32 diag_authorization_identifier[16];
+ struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX];
+};
+
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60)
+#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01)
+#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff)
+#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000)
+union mpi3_ci_manifest {
+ struct mpi3_ci_manifest_mpi mpi;
+ __le32 dword[1];
+};
+
+#define MPI3_CI_MANIFEST_TYPE_MPI (0x00)
struct mpi3_extended_image_header {
u8 image_type;
u8 reserved01[3];
@@ -161,6 +220,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03)
#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
@@ -178,7 +238,6 @@ struct mpi3_encrypted_key_with_hash_entry {
u8 reserved03;
__le32 reserved04;
__le32 public_key[MPI3_PUBLIC_KEY_MAX];
- __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
};
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index e02b6d3cfba2..3c03610ecfa6 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -1,19 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_INIT_H
#define MPI30_INIT_H 1
struct mpi3_scsi_io_cdb_eedp32 {
u8 cdb[20];
- __be32 primary_reference_tag;
+ __be32 primary_reference_tag;
__le16 primary_application_tag;
__le16 primary_application_tag_mask;
__le32 transfer_length;
};
-union mpi3_scso_io_cdb_union {
+union mpi3_scsi_io_cdb_union {
u8 cdb32[32];
struct mpi3_scsi_io_cdb_eedp32 eedp32;
struct mpi3_sge_common sge;
@@ -32,11 +31,12 @@ struct mpi3_scsi_io_request {
__le32 skip_count;
__le32 data_length;
u8 lun[8];
- union mpi3_scso_io_cdb_union cdb;
+ union mpi3_scsi_io_cdb_union cdb;
union mpi3_sge_union sgl[4];
};
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
+#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
@@ -54,6 +54,9 @@ struct mpi3_scsi_io_request {
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
#define MPI3_SCSIIO_METASGL_INDEX (3)
struct mpi3_scsi_io_reply {
__le16 host_tag;
@@ -111,49 +114,4 @@ struct mpi3_scsi_io_reply {
#define MPI3_SCSI_RSP_ARI0_MASK (0xff000000)
#define MPI3_SCSI_RSP_ARI0_SHIFT (24)
#define MPI3_SCSI_TASKTAG_UNKNOWN (0xffff)
-struct mpi3_scsi_task_mgmt_request {
- __le16 host_tag;
- u8 ioc_use_only02;
- u8 function;
- __le16 ioc_use_only04;
- u8 ioc_use_only06;
- u8 msg_flags;
- __le16 change_count;
- __le16 dev_handle;
- __le16 task_host_tag;
- u8 task_type;
- u8 reserved0f;
- __le16 task_request_queue_id;
- __le16 reserved12;
- __le32 reserved14;
- u8 lun[8];
-};
-
-#define MPI3_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x08)
-#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
-#define MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK_SET (0x02)
-#define MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
-#define MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
-#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
-#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
-#define MPI3_SCSITASKMGMT_TASKTYPE_CLEAR_ACA (0x08)
-#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK_SET (0x09)
-#define MPI3_SCSITASKMGMT_TASKTYPE_QUERY_ASYNC_EVENT (0x0a)
-#define MPI3_SCSITASKMGMT_TASKTYPE_I_T_NEXUS_RESET (0x0b)
-struct mpi3_scsi_task_mgmt_reply {
- __le16 host_tag;
- u8 ioc_use_only02;
- u8 function;
- __le16 ioc_use_only04;
- u8 ioc_use_only06;
- u8 msg_flags;
- __le16 ioc_use_only08;
- __le16 ioc_status;
- __le32 ioc_log_info;
- __le32 termination_count;
- __le32 response_data;
- __le32 reserved18;
-};
-
-#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 1af99a5382d5..1c6c6730df5c 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_IOC_H
#define MPI30_IOC_H 1
@@ -29,20 +28,15 @@ struct mpi3_ioc_init_request {
__le64 driver_information_address;
};
-#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
-#define MPI3_WHOINIT_ROM_BIOS (0x02)
-#define MPI3_WHOINIT_HOST_DRIVER (0x03)
-#define MPI3_WHOINIT_MANUFACTURER (0x04)
-struct mpi3_driver_info_layout {
- __le32 information_length;
- u8 driver_signature[12];
- u8 os_name[16];
- u8 os_version[12];
- u8 driver_name[20];
- u8 driver_version[32];
- u8 driver_release_date[20];
- __le32 driver_capabilities;
-};
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
struct mpi3_ioc_facts_request {
__le16 host_tag;
@@ -66,7 +60,7 @@ struct mpi3_ioc_facts_data {
u8 ioc_number;
u8 who_init;
__le16 max_msix_vectors;
- __le16 max_outstanding_request;
+ __le16 max_outstanding_requests;
__le16 product_id;
__le16 ioc_request_frame_size;
__le16 reply_frame_size;
@@ -77,17 +71,17 @@ struct mpi3_ioc_facts_data {
u8 sge_modifier_shift;
u8 protocol_flags;
__le16 max_sas_initiators;
- __le16 max_sas_targets;
+ __le16 max_data_length;
__le16 max_sas_expanders;
__le16 max_enclosures;
__le16 min_dev_handle;
__le16 max_dev_handle;
- __le16 max_pc_ie_switches;
+ __le16 max_pcie_switches;
__le16 max_nvme;
- __le16 max_pds;
+ __le16 reserved38;
__le16 max_vds;
__le16 max_host_pds;
- __le16 max_advanced_host_pds;
+ __le16 max_adv_host_pds;
__le16 max_raid_pds;
__le16 max_posted_cmd_buffers;
__le32 flags;
@@ -97,26 +91,47 @@ struct mpi3_ioc_facts_data {
__le16 reserved4e;
__le32 diag_trace_size;
__le32 diag_fw_size;
+ __le32 diag_driver_size;
+ u8 max_host_pd_ns_count;
+ u8 max_adv_host_pd_ns_count;
+ u8 max_raidpd_ns_count;
+ u8 max_devices_per_throttle_group;
+ __le16 io_throttle_data_length;
+ __le16 max_io_throttle_group;
+ __le16 io_throttle_low;
+ __le16 io_throttle_high;
};
-
-#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
+#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_GRAN_MASK (0x00000001)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_IOC_GRAN (0x00000000)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_REPLY_Q_GRAN (0x00000001)
+#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000)
+#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_RAID (0x0100)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0200)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_RAID (0x0300)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600)
#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
@@ -130,6 +145,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_PROTOCOL_NVME (0x0004)
#define MPI3_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
#define MPI3_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED (0x0000)
#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
@@ -140,6 +156,8 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
+#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
+#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
struct mpi3_mgmt_passthrough_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -175,6 +193,7 @@ struct mpi3_create_request_queue_request {
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_request_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -207,9 +226,11 @@ struct mpi3_create_reply_queue_request {
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_reply_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -235,7 +256,6 @@ struct mpi3_port_enable_request {
#define MPI3_EVENT_LOG_DATA (0x01)
#define MPI3_EVENT_CHANGE (0x02)
#define MPI3_EVENT_GPIO_INTERRUPT (0x04)
-#define MPI3_EVENT_TEMP_THRESHOLD (0x05)
#define MPI3_EVENT_CABLE_MGMT (0x06)
#define MPI3_EVENT_DEVICE_ADDED (0x07)
#define MPI3_EVENT_DEVICE_INFO_CHANGED (0x08)
@@ -255,7 +275,9 @@ struct mpi3_port_enable_request {
#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
+#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23)
#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
+#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50)
#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
@@ -300,21 +322,6 @@ struct mpi3_event_data_gpio_interrupt {
u8 gpio_num;
u8 reserved01[3];
};
-
-struct mpi3_event_data_temp_threshold {
- __le16 status;
- u8 sensor_num;
- u8 reserved03;
- __le16 current_temperature;
- __le16 reserved06;
- __le32 reserved08;
- __le32 reserved0c;
-};
-
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD3_EXCEEDED (0x0008)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD2_EXCEEDED (0x0004)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD1_EXCEEDED (0x0002)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD0_EXCEEDED (0x0001)
struct mpi3_event_data_cable_management {
__le32 active_cable_power_requirement;
u8 status;
@@ -398,8 +405,10 @@ struct mpi3_event_data_sas_discovery {
#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000)
+#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000)
#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
-#define MPI3_SAS_DISC_STATUS_SLOT_COUNT_MISMATCH (0x00002000)
+#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000)
#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
@@ -581,6 +590,20 @@ struct mpi3_event_data_pcie_topology_change_list {
#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_pcie_error_threshold {
+ __le64 timestamp;
+ u8 reason_code;
+ u8 port;
+ __le16 switch_dev_handle;
+ u8 error;
+ u8 action;
+ __le16 threshold_count;
+ __le16 attached_dev_handle;
+ __le16 reserved12;
+};
+
+#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
+#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01)
struct mpi3_event_data_sas_init_dev_status_change {
u8 reason_code;
u8 io_unit_port;
@@ -604,6 +627,16 @@ struct mpi3_event_data_hard_reset_received {
__le16 reserved02;
};
+struct mpi3_event_data_diag_buffer_status_change {
+ u8 type;
+ u8 reason_code;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
@@ -645,21 +678,23 @@ struct mpi3_pel_seq {
};
struct mpi3_pel_entry {
+ __le64 time_stamp;
__le32 sequence_number;
- __le32 time_stamp[2];
__le16 log_code;
__le16 arg_type;
__le16 locale;
u8 class;
- u8 reserved13;
+ u8 flags;
u8 ext_num;
u8 num_exts;
u8 arg_data_size;
- u8 fixed_format_size;
+ u8 fixed_format_strings_size;
__le32 reserved18[2];
__le32 pel_info[24];
};
+#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02)
+#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01)
struct mpi3_pel_list {
__le32 log_count;
__le32 reserved04;
@@ -837,7 +872,10 @@ struct mpi3_pel_req_action_acknowledge {
__le32 reserved10;
};
-#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
struct mpi3_pel_reply {
__le16 host_tag;
u8 ioc_use_only02;
@@ -885,6 +923,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
+#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05)
struct mpi3_ci_download_reply {
__le16 host_tag;
u8 ioc_use_only02;
@@ -902,6 +941,8 @@ struct mpi3_ci_download_reply {
};
#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_FAILURE (0x40)
+#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
@@ -936,22 +977,34 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_OP_LOOKUP_MAPPING (0x02)
#define MPI3_CTRL_OP_UPDATE_TIMESTAMP (0x04)
#define MPI3_CTRL_OP_GET_TIMESTAMP (0x05)
+#define MPI3_CTRL_OP_GET_IOC_CHANGE_COUNT (0x06)
+#define MPI3_CTRL_OP_CHANGE_PROFILE (0x07)
#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
-#define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20)
-#define MPI3_CTRL_OP_SAS_CLEAR_ERROR_LOG (0x21)
-#define MPI3_CTRL_OP_PCIE_CLEAR_ERROR_LOG (0x22)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13)
+#define MPI3_CTRL_OP_SEND_SAS_PRIMITIVE (0x20)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30)
#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
+#define MPI3_CTRL_OP_CHANGE_PROFILE_PARAM8_PROFILE_ID_INDEX (0x00)
#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
-#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00)
-#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
-#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
-#define MPI3_CTRL_OP_SAS_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
-#define MPI3_CTRL_OP_PCIE_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
+#define MPI3_CTRL_OP_SEND_SAS_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
@@ -966,9 +1019,15 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_GET_IOC_CHANGE_COUNT_VALUE16_CHANGECOUNT_INDEX (0)
+#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0)
#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
+#define MPI3_CTRL_ACTION_NOP (0x00)
+#define MPI3_CTRL_ACTION_LINK_RESET (0x01)
+#define MPI3_CTRL_ACTION_HARD_RESET (0x02)
+#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05)
struct mpi3_iounit_control_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
new file mode 100644
index 000000000000..b7a5df01120d
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_PCI_H
+#define MPI30_PCI_H 1
+
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
+
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index ba5018702960..e587f77ccd68 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_SAS_H
#define MPI30_SAS_H 1
@@ -30,4 +29,18 @@ struct mpi3_smp_passthrough_request {
struct mpi3_sge_common request_sge;
struct mpi3_sge_common response_sge;
};
+
+struct mpi3_smp_passthrough_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le16 response_data_length;
+ __le16 reserved12;
+};
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 63e4e81d5397..9b76b9632751 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Copyright 2016-2021 Broadcom Inc. All rights reserved.
- *
+ * Copyright 2016-2022 Broadcom Inc. All rights reserved.
*/
#ifndef MPI30_TRANSPORT_H
#define MPI30_TRANSPORT_H 1
@@ -19,8 +18,9 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (0)
-#define MPI3_VERSION_DEV (18)
+#define MPI3_VERSION_UNIT (26)
+#define MPI3_VERSION_DEV (0)
+#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
__le16 producer_index;
__le16 reserved02;
@@ -74,6 +74,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
+#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001)
#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
@@ -82,12 +83,13 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
-#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN (0x00002000)
+#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000)
#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
@@ -107,9 +109,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
-#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x30000000)
-#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_MASK (0x00ff0000)
-#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_SHIFT (16)
+#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
@@ -117,9 +119,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
-#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(n) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
-#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(n) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
@@ -133,7 +135,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
-#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_FLASH_RCVRY_RESET (0x00000200)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
@@ -153,8 +155,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
-#define MPI3_SYSIF_FAULT_CODE_SAFE_MODE_EXIT (0x0000f004)
-#define MPI3_SYSIF_FAULT_CODE_FACTORY_RESET (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
+#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
@@ -208,6 +211,7 @@ struct mpi3_default_reply_descriptor {
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS (0x3000)
+#define MPI3_REPLY_DESCRIPT_REQUEST_QUEUE_ID_INVALID (0xffff)
struct mpi3_address_reply_descriptor {
__le64 reply_frame_address;
__le16 request_queue_ci;
@@ -305,7 +309,7 @@ union mpi3_sge_union {
#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
-#define MPI3_SGE_FLAGS_DLAS_IOC_DDR (0x01)
+#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
#define MPI3_SGE_EXT_OPER_EEDP (0x00)
#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
@@ -326,7 +330,6 @@ union mpi3_sge_union {
#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
-#define MPI3_EEDPFLAGS_EEDP_OP_NOOP (0x0000)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
@@ -409,6 +412,8 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
+#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
+#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d)
#define MPI3_IOCSTATUS_FAILURE (0x001f)
#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
@@ -448,8 +453,10 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
-#define MPI3_IOCSTATUS_CI_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4)
#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
+#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1)
#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 9787b53a2b59..def4c5e15cd8 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -38,6 +38,8 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+#include <scsi/scsi_transport_sas.h>
#include "mpi/mpi30_transport.h"
#include "mpi/mpi30_cnfg.h"
@@ -45,15 +47,17 @@
#include "mpi/mpi30_init.h"
#include "mpi/mpi30_ioc.h"
#include "mpi/mpi30_sas.h"
+#include "mpi/mpi30_pci.h"
#include "mpi3mr_debug.h"
/* Global list and lock for storing multiple adapters managed by the driver */
extern spinlock_t mrioc_list_lock;
extern struct list_head mrioc_list;
extern int prot_mask;
+extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "00.255.45.01"
-#define MPI3MR_DRIVER_RELDATE "12-December-2020"
+#define MPI3MR_DRIVER_VERSION "8.2.0.3.0"
+#define MPI3MR_DRIVER_RELDATE "08-September-2022"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -63,12 +67,14 @@ extern int prot_mask;
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
+#define MPI3MR_MAX_SECTORS 2048
+
/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_PAGE_SIZE_4K 4096
#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
/* Definitions for MAX values for shost */
-#define MPI3MR_MAX_CMDS_LUN 7
+#define MPI3MR_MAX_CMDS_LUN 128
#define MPI3MR_MAX_CDB_LENGTH 32
/* Admin queue management definitions */
@@ -79,7 +85,8 @@ extern int prot_mask;
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
-#define MPI3MR_OP_REP_Q_QD 4096
+#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
#define MPI3MR_MAX_SEG_LIST_SIZE 4096
@@ -87,31 +94,49 @@ extern int prot_mask;
/* Reserved Host Tag definitions */
#define MPI3MR_HOSTTAG_INVALID 0xFFFF
#define MPI3MR_HOSTTAG_INITCMDS 1
-#define MPI3MR_HOSTTAG_IOCTLCMDS 2
+#define MPI3MR_HOSTTAG_BSG_CMDS 2
+#define MPI3MR_HOSTTAG_PEL_ABORT 3
+#define MPI3MR_HOSTTAG_PEL_WAIT 4
#define MPI3MR_HOSTTAG_BLK_TMS 5
+#define MPI3MR_HOSTTAG_CFG_CMDS 6
+#define MPI3MR_HOSTTAG_TRANSPORT_CMDS 7
-#define MPI3MR_NUM_DEVRMCMD 1
-#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
+#define MPI3MR_NUM_DEVRMCMD 16
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_TRANSPORT_CMDS + 1)
#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
MPI3MR_NUM_DEVRMCMD - 1)
-#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_NUM_EVTACKCMD 4
+#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1)
+#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \
+ MPI3MR_NUM_EVTACKCMD - 1)
/* Reduced resource count definition for crash kernel */
#define MPI3MR_HOST_IOS_KDUMP 128
/* command/controller interaction timeout definitions in seconds */
-#define MPI3MR_INTADMCMD_TIMEOUT 10
+#define MPI3MR_INTADMCMD_TIMEOUT 60
#define MPI3MR_PORTENABLE_TIMEOUT 300
-#define MPI3MR_ABORTTM_TIMEOUT 30
-#define MPI3MR_RESETTM_TIMEOUT 30
+#define MPI3MR_PORTENABLE_POLL_INTERVAL 5
+#define MPI3MR_ABORTTM_TIMEOUT 60
+#define MPI3MR_RESETTM_TIMEOUT 60
#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
#define MPI3MR_TSUPDATE_INTERVAL 900
#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
+#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
+#define MPI3MR_RESET_ACK_TIMEOUT 30
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+#define MPI3MR_DEFAULT_CFG_PAGE_SZ 1024 /* in bytes */
+
+#define MPI3MR_RESET_TOPOLOGY_SETTLE_TIME 10
+
+#define MPI3MR_SCMD_TIMEOUT (60 * HZ)
+#define MPI3MR_EH_SCMD_TIMEOUT (60 * HZ)
+
/* Internal admin command state definitions*/
#define MPI3MR_CMD_NOTUSED 0x8000
#define MPI3MR_CMD_COMPLETE 0x0001
@@ -121,7 +146,7 @@ extern int prot_mask;
/* Definitions for Event replies and sense buffer allocated per controller */
#define MPI3MR_NUM_EVT_REPLIES 64
-#define MPI3MR_SENSEBUF_SZ 256
+#define MPI3MR_SENSE_BUF_SZ 256
#define MPI3MR_SENSEBUF_FACTOR 3
#define MPI3MR_CHAINBUF_FACTOR 3
#define MPI3MR_CHAINBUFDIX_FACTOR 2
@@ -135,19 +160,15 @@ extern int prot_mask;
/* ResponseCode definitions */
#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
-#define MPI3MR_RSP_TM_COMPLETE 0x00
-#define MPI3MR_RSP_INVALID_FRAME 0x02
-#define MPI3MR_RSP_TM_NOT_SUPPORTED 0x04
-#define MPI3MR_RSP_TM_FAILED 0x05
-#define MPI3MR_RSP_TM_SUCCEEDED 0x08
-#define MPI3MR_RSP_TM_INVALID_LUN 0x09
-#define MPI3MR_RSP_TM_OVERLAPPED_TAG 0x0A
#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
#define MPI3MR_DEFAULT_MDTS (128 * 1024)
+#define MPI3MR_DEFAULT_PGSZEXP (12)
+
/* Command retry count definitions */
#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
+#define MPI3MR_PEL_RETRY_COUNT 3
/* Default target device queue depth */
#define MPI3MR_DEFAULT_SDEV_QD 32
@@ -173,6 +194,57 @@ extern int prot_mask;
/* MSI Index from Reply Queue Index */
#define REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, offset) (qidx + offset)
+/*
+ * Maximum data transfer size definitions for management
+ * application commands
+ */
+#define MPI3MR_MAX_APP_XFER_SIZE (1 * 1024 * 1024)
+#define MPI3MR_MAX_APP_XFER_SEGMENTS 512
+/*
+ * 2048 sectors are for data buffers and additional 512 sectors for
+ * other buffers
+ */
+#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512)
+
+/**
+ * struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
+ * Encapsulated commands.
+ *
+ * @base_addr: Physical address
+ * @length: SGE length
+ * @rsvd: Reserved
+ * @rsvd1: Reserved
+ * @sgl_type: sgl type
+ */
+struct mpi3mr_nvme_pt_sge {
+ u64 base_addr;
+ u32 length;
+ u16 rsvd;
+ u8 rsvd1;
+ u8 sgl_type;
+};
+
+/**
+ * struct mpi3mr_buf_map - local structure to
+ * track kernel and user buffers associated with an BSG
+ * structure.
+ *
+ * @bsg_buf: BSG buffer virtual address
+ * @bsg_buf_len: BSG buffer length
+ * @kern_buf: Kernel buffer virtual address
+ * @kern_buf_len: Kernel buffer length
+ * @kern_buf_dma: Kernel buffer DMA address
+ * @data_dir: Data direction.
+ */
+struct mpi3mr_buf_map {
+ void *bsg_buf;
+ u32 bsg_buf_len;
+ void *kern_buf;
+ u32 kern_buf_len;
+ dma_addr_t kern_buf_dma;
+ u8 data_dir;
+};
+
/* IOC State definitions */
enum mpi3mr_iocstate {
MRIOC_STATE_READY = 1,
@@ -183,28 +255,14 @@ enum mpi3mr_iocstate {
MRIOC_STATE_UNRECOVERABLE,
};
-/* Init type definitions */
-enum mpi3mr_init_type {
- MPI3MR_IT_INIT = 0,
- MPI3MR_IT_RESET,
- MPI3MR_IT_RESUME,
-};
-
-/* Cleanup reason definitions */
-enum mpi3mr_cleanup_reason {
- MPI3MR_COMPLETE_CLEANUP = 0,
- MPI3MR_REINIT_FAILURE,
- MPI3MR_SUSPEND,
-};
-
/* Reset reason code definitions*/
enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_BRINGUP = 1,
MPI3MR_RESET_FROM_FAULT_WATCH = 2,
- MPI3MR_RESET_FROM_IOCTL = 3,
+ MPI3MR_RESET_FROM_APP = 3,
MPI3MR_RESET_FROM_EH_HOS = 4,
MPI3MR_RESET_FROM_TM_TIMEOUT = 5,
- MPI3MR_RESET_FROM_IOCTL_TIMEOUT = 6,
+ MPI3MR_RESET_FROM_APP_TIMEOUT = 6,
MPI3MR_RESET_FROM_MUR_FAILURE = 7,
MPI3MR_RESET_FROM_CTLR_CLEANUP = 8,
MPI3MR_RESET_FROM_CIACTIV_FAULT = 9,
@@ -222,7 +280,16 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
MPI3MR_RESET_FROM_SYSFS = 23,
- MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24
+ MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
+ MPI3MR_RESET_FROM_FIRMWARE = 27,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT = 30,
+};
+
+/* Queue type definitions */
+enum queue_type {
+ MPI3MR_DEFAULT_QUEUE = 0,
+ MPI3MR_POLL_QUEUE,
};
/**
@@ -263,7 +330,7 @@ struct mpi3mr_ioc_facts {
u16 max_vds;
u16 max_hpds;
u16 max_advhpds;
- u16 max_raidpds;
+ u16 max_raid_pds;
u16 min_devhandle;
u16 max_devhandle;
u16 max_op_req_q;
@@ -278,6 +345,12 @@ struct mpi3mr_ioc_facts {
u8 sge_mod_mask;
u8 sge_mod_value;
u8 sge_mod_shift;
+ u8 max_dev_per_tg;
+ u16 max_io_throttle_group;
+ u16 io_throttle_data_length;
+ u16 io_throttle_low;
+ u16 io_throttle_high;
+
};
/**
@@ -336,6 +409,7 @@ struct op_req_qinfo {
* @pend_ios: Number of IOs pending in HW for this queue
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
+ * @qtype: Type of queue (types defined in enum queue_type)
*/
struct op_reply_qinfo {
u16 ci;
@@ -350,33 +424,183 @@ struct op_reply_qinfo {
atomic_t pend_ios;
bool enable_irq_poll;
atomic_t in_use;
+ enum queue_type qtype;
};
/**
* struct mpi3mr_intr_info - Interrupt cookie information
*
* @mrioc: Adapter instance reference
+ * @os_irq: irq number
* @msix_index: MSIx index
* @op_reply_q: Associated operational reply queue
* @name: Dev name for the irq claiming device
*/
struct mpi3mr_intr_info {
struct mpi3mr_ioc *mrioc;
+ int os_irq;
u16 msix_index;
struct op_reply_qinfo *op_reply_q;
char name[MPI3MR_NAME_LENGTH];
};
/**
+ * struct mpi3mr_throttle_group_info - Throttle group info
+ *
+ * @io_divert: Flag indicates io divert is on or off for the TG
+ * @need_qd_reduction: Flag to indicate QD reduction is needed
+ * @qd_reduction: Queue Depth reduction in units of 10%
+ * @fw_qd: QueueDepth value reported by the firmware
+ * @modified_qd: Modified QueueDepth value due to throttling
+ * @id: Throttle Group ID.
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @pend_large_data_sz: Counter to track pending large data
+ */
+struct mpi3mr_throttle_group_info {
+ u8 io_divert;
+ u8 need_qd_reduction;
+ u8 qd_reduction;
+ u16 fw_qd;
+ u16 modified_qd;
+ u16 id;
+ u32 high;
+ u32 low;
+ atomic_t pend_large_data_sz;
+};
+
+/* HBA port flags */
+#define MPI3MR_HBA_PORT_FLAG_DIRTY 0x01
+
+/**
+ * struct mpi3mr_hba_port - HBA's port information
+ * @port_id: Port number
+ * @flags: HBA port flags
+ */
+struct mpi3mr_hba_port {
+ struct list_head list;
+ u8 port_id;
+ u8 flags;
+};
+
+/**
+ * struct mpi3mr_sas_port - Internal SAS port information
+ * @port_list: List of ports belonging to a SAS node
+ * @num_phys: Number of phys associated with port
+ * @marked_responding: used while refresing the sas ports
+ * @lowest_phy: lowest phy ID of current sas port
+ * @phy_mask: phy_mask of current sas port
+ * @hba_port: HBA port entry
+ * @remote_identify: Attached device identification
+ * @rphy: SAS transport layer rphy object
+ * @port: SAS transport layer port object
+ * @phy_list: mpi3mr_sas_phy objects belonging to this port
+ */
+struct mpi3mr_sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ u8 marked_responding;
+ int lowest_phy;
+ u32 phy_mask;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct mpi3mr_sas_phy - Internal SAS Phy information
+ * @port_siblings: List of phys belonging to a port
+ * @identify: Phy identification
+ * @remote_identify: Attached device identification
+ * @phy: SAS transport layer Phy object
+ * @phy_id: Unique phy id within a port
+ * @handle: Firmware device handle for this phy
+ * @attached_handle: Firmware device handle for attached device
+ * @phy_belongs_to_port: Flag to indicate phy belongs to port
+ @hba_port: HBA port entry
+ */
+struct mpi3mr_sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+ struct mpi3mr_hba_port *hba_port;
+};
+
+/**
+ * struct mpi3mr_sas_node - SAS host/expander information
+ * @list: List of sas nodes in a controller
+ * @parent_dev: Parent device class
+ * @num_phys: Number phys belonging to sas_node
+ * @sas_address: SAS address of sas_node
+ * @handle: Firmware device handle for this sas_host/expander
+ * @sas_address_parent: SAS address of parent expander or host
+ * @enclosure_handle: Firmware handle of enclosure of this node
+ * @device_info: Capabilities of this sas_host/expander
+ * @non_responding: used to refresh the expander devices during reset
+ * @host_node: Flag to indicate this is a host_node
+ * @hba_port: HBA port entry
+ * @phy: A list of phys that make up this sas_host/expander
+ * @sas_port_list: List of internal ports of this node
+ * @rphy: sas_rphy object of this expander node
+ */
+struct mpi3mr_sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 non_responding;
+ u8 host_node;
+ struct mpi3mr_hba_port *hba_port;
+ struct mpi3mr_sas_phy *phy;
+ struct list_head sas_port_list;
+ struct sas_rphy *rphy;
+};
+
+/**
+ * struct mpi3mr_enclosure_node - enclosure information
+ * @list: List of enclosures
+ * @pg0: Enclosure page 0;
+ */
+struct mpi3mr_enclosure_node {
+ struct list_head list;
+ struct mpi3_enclosure_page0 pg0;
+};
+
+/**
* struct tgt_dev_sas_sata - SAS/SATA device specific
* information cached from firmware given data
*
* @sas_address: World wide unique SAS address
+ * @sas_address_parent: Sas address of parent expander or host
* @dev_info: Device information bits
+ * @phy_id: Phy identifier provided in device page 0
+ * @attached_phy_id: Attached phy identifier provided in device page 0
+ * @sas_transport_attached: Is this device exposed to transport
+ * @pend_sas_rphy_add: Flag to check device is in process of add
+ * @hba_port: HBA port entry
+ * @rphy: SAS transport layer rphy object
*/
struct tgt_dev_sas_sata {
u64 sas_address;
+ u64 sas_address_parent;
u16 dev_info;
+ u8 phy_id;
+ u8 attached_phy_id;
+ u8 sas_transport_attached;
+ u8 pend_sas_rphy_add;
+ struct mpi3mr_hba_port *hba_port;
+ struct sas_rphy *rphy;
};
/**
@@ -388,6 +612,7 @@ struct tgt_dev_sas_sata {
* @pgsz: Device page size
* @abort_to: Timeout for abort TM
* @reset_to: Timeout for Target/LUN reset TM
+ * @dev_info: Device information bits
*/
struct tgt_dev_pcie {
u32 mdts;
@@ -395,25 +620,37 @@ struct tgt_dev_pcie {
u8 pgsz;
u8 abort_to;
u8 reset_to;
+ u16 dev_info;
};
/**
- * struct tgt_dev_volume - virtual device specific information
+ * struct tgt_dev_vd - virtual device specific information
* cached from firmware given data
*
* @state: State of the VD
+ * @tg_qd_reduction: Queue Depth reduction in units of 10%
+ * @tg_id: VDs throttle group ID
+ * @high: High limit to turn on throttling in 512 byte blocks
+ * @low: Low limit to turn off throttling in 512 byte blocks
+ * @tg: Pointer to throttle group info
*/
-struct tgt_dev_volume {
+struct tgt_dev_vd {
u8 state;
+ u8 tg_qd_reduction;
+ u16 tg_id;
+ u32 tg_high;
+ u32 tg_low;
+ struct mpi3mr_throttle_group_info *tg;
};
+
/**
* union _form_spec_inf - union of device specific information
*/
union _form_spec_inf {
struct tgt_dev_sas_sata sas_sata_inf;
struct tgt_dev_pcie pcie_inf;
- struct tgt_dev_volume vol_inf;
+ struct tgt_dev_vd vd_inf;
};
@@ -428,11 +665,16 @@ union _form_spec_inf {
* @slot: Slot number
* @encl_handle: FW enclosure handle
* @perst_id: FW assigned Persistent ID
+ * @devpg0_flag: Device Page0 flag
* @dev_type: SAS/SATA/PCIE device type
* @is_hidden: Should be exposed to upper layers or not
* @host_exposed: Already exposed to host or not
+ * @io_unit_port: IO Unit port ID
+ * @non_stl: Is this device not to be attached with SAS TL
+ * @io_throttle_enabled: I/O throttling needed or not
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
+ * @enclosure_logical_id: Enclosure logical identifier
* @dev_spec: Device type specific information
* @ref_count: Reference count
*/
@@ -444,11 +686,16 @@ struct mpi3mr_tgt_dev {
u16 slot;
u16 encl_handle;
u16 perst_id;
+ u16 devpg0_flag;
u8 dev_type;
u8 is_hidden;
u8 host_exposed;
+ u8 io_unit_port;
+ u8 non_stl;
+ u8 io_throttle_enabled;
u16 q_depth;
u64 wwid;
+ u64 enclosure_logical_id;
union _form_spec_inf dev_spec;
struct kref ref_count;
};
@@ -498,7 +745,12 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removed: Device removed in the Firmware
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
+ * @io_throttle_enabled: I/O throttling needed or not
+ * @io_divert: Flag indicates io divert is on or off for the dev
+ * @throttle_group: Pointer to throttle group info
* @tgt_dev: Internal target device pointer
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
*/
struct mpi3mr_stgt_priv_data {
struct scsi_target *starget;
@@ -509,7 +761,11 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removed;
u8 dev_removedelay;
u8 dev_type;
+ u8 io_throttle_enabled;
+ u8 io_divert;
+ struct mpi3mr_throttle_group_info *throttle_group;
struct mpi3mr_tgt_dev *tgt_dev;
+ u32 pend_count;
};
/**
@@ -518,11 +774,14 @@ struct mpi3mr_stgt_priv_data {
* @tgt_priv_data: Scsi_target private data pointer
* @lun_id: LUN ID of the device
* @ncq_prio_enable: NCQ priority enable for SATA device
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
*/
struct mpi3mr_sdev_priv_data {
struct mpi3mr_stgt_priv_data *tgt_priv_data;
u32 lun_id;
u8 ncq_prio_enable;
+ u32 pend_count;
};
/**
@@ -538,6 +797,7 @@ struct mpi3mr_sdev_priv_data {
* @ioc_status: IOC status from the firmware
* @ioc_loginfo:IOC log info from the firmware
* @is_waiting: Is the command issued in block mode
+ * @is_sense: Is Sense data present
* @retry_count: Retry count for retriable commands
* @host_tag: Host tag used by the command
* @callback: Callback for non blocking commands
@@ -553,6 +813,7 @@ struct mpi3mr_drv_cmd {
u16 ioc_status;
u32 ioc_loginfo;
u8 is_waiting;
+ u8 is_sense;
u8 retry_count;
u16 host_tag;
@@ -560,6 +821,21 @@ struct mpi3mr_drv_cmd {
struct mpi3mr_drv_cmd *drv_cmd);
};
+/**
+ * struct dma_memory_desc - memory descriptor structure to store
+ * virtual address, dma address and size for any generic dma
+ * memory allocations in the driver.
+ *
+ * @size: buffer size
+ * @addr: virtual address
+ * @dma_addr: dma address
+ */
+struct dma_memory_desc {
+ u32 size;
+ void *addr;
+ dma_addr_t dma_addr;
+};
+
/**
* struct chain_element - memory descriptor structure to store
@@ -630,12 +906,14 @@ struct scmd_priv {
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
+ * @is_intr_info_set: Flag to indicate intr info is setup
* @num_queues: Number of operational queues
* @num_op_req_q: Number of operational request queues
* @req_qinfo: Operational request queue info pointer
* @num_op_reply_q: Number of operational reply queues
* @op_reply_qinfo: Operational reply queue info pointer
* @init_cmds: Command tracker for initialization commands
+ * @cfg_cmds: Command tracker for configuration requests
* @facts: Cached IOC facts data
* @op_reply_desc_sz: Operational reply descriptor size
* @num_reply_bufs: Number of reply buffers allocated
@@ -672,6 +950,7 @@ struct scmd_priv {
* @scan_started: Async scan started
* @scan_failed: Asycn scan failed
* @stop_drv_processing: Stop all command processing
+ * @device_refresh_on: Don't process the events until devices are refreshed
* @max_host_ios: Maximum host I/O count
* @chain_buf_count: Chain buffer count
* @chain_buf_pool: Chain buffer pool
@@ -679,26 +958,72 @@ struct scmd_priv {
* @chain_bitmap_sz: Chain buffer allocator bitmap size
* @chain_bitmap: Chain buffer allocator bitmap
* @chain_buf_lock: Chain buffer list lock
+ * @bsg_cmds: Command tracker for BSG command
* @host_tm_cmds: Command tracker for task management commands
* @dev_rmhs_cmds: Command tracker for device removal commands
+ * @evtack_cmds: Command tracker for event ack commands
* @devrem_bitmap_sz: Device removal bitmap size
* @devrem_bitmap: Device removal bitmap
* @dev_handle_bitmap_sz: Device handle bitmap size
* @removepend_bitmap: Remove pending bitmap
* @delayed_rmhs_list: Delayed device removal list
+ * @evtack_cmds_bitmap_sz: Event Ack bitmap size
+ * @evtack_cmds_bitmap: Event Ack bitmap
+ * @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter
- * @fault_dbg: Fault debug flag
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
+ * @prepare_for_reset: Prepare for reset event received
+ * @prepare_for_reset_timeout_counter: Prepare for reset timeout
+ * @prp_list_virt: NVMe encapsulated PRP list virtual base
+ * @prp_list_dma: NVMe encapsulated PRP list DMA
+ * @prp_sz: NVME encapsulated PRP list size
* @diagsave_timeout: Diagnostic information save timeout
* @logging_level: Controller debug logging level
* @flush_io_count: I/O count to flush after reset
* @current_event: Firmware event currently in process
* @driver_info: Driver, Kernel, OS information to firmware
* @change_count: Topology change count
+ * @pel_enabled: Persistent Event Log(PEL) enabled or not
+ * @pel_abort_requested: PEL abort is requested or not
+ * @pel_class: PEL Class identifier
+ * @pel_locale: PEL Locale identifier
+ * @pel_cmds: Command tracker for PEL wait command
+ * @pel_abort_cmd: Command tracker for PEL abort command
+ * @pel_newest_seqnum: Newest PEL sequenece number
+ * @pel_seqnum_virt: PEL sequence number virtual address
+ * @pel_seqnum_dma: PEL sequence number DMA address
+ * @pel_seqnum_sz: PEL sequenece number size
* @op_reply_q_offset: Operational reply queue offset with MSIx
+ * @default_qcount: Total Default queues
+ * @active_poll_qcount: Currently active poll queue count
+ * @requested_poll_qcount: User requested poll queue count
+ * @bsg_dev: BSG device structure
+ * @bsg_queue: Request queue for BSG device
+ * @stop_bsgs: Stop BSG request flag
+ * @logdata_buf: Circular buffer to store log data entries
+ * @logdata_buf_idx: Index of entry in buffer to store
+ * @logdata_entry_sz: log data entry size
+ * @pend_large_data_sz: Counter to track pending large data
+ * @io_throttle_data_length: I/O size to track in 512b blocks
+ * @io_throttle_high: I/O size to start throttle in 512b blocks
+ * @io_throttle_low: I/O size to stop throttle in 512b blocks
+ * @num_io_throttle_group: Maximum number of throttle groups
+ * @throttle_groups: Pointer to throttle group info structures
+ * @cfg_page: Default memory for configuration pages
+ * @cfg_page_dma: Configuration page DMA address
+ * @cfg_page_sz: Default configuration page memory size
+ * @sas_transport_enabled: SAS transport enabled or not
+ * @scsi_device_channel: Channel ID for SCSI devices
+ * @transport_cmds: Command tracker for SAS transport commands
+ * @sas_hba: SAS node for the controller
+ * @sas_expander_list: SAS node list of expanders
+ * @sas_node_lock: Lock to protect SAS node list
+ * @hba_port_table_list: List of HBA Ports
+ * @enclosure_list: List of Enclosure objects
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -739,6 +1064,7 @@ struct mpi3mr_ioc {
struct mpi3mr_intr_info *intr_info;
u16 intr_info_count;
+ bool is_intr_info_set;
u16 num_queues;
u16 num_op_req_q;
@@ -748,6 +1074,7 @@ struct mpi3mr_ioc {
struct op_reply_qinfo *op_reply_qinfo;
struct mpi3mr_drv_cmd init_cmds;
+ struct mpi3mr_drv_cmd cfg_cmds;
struct mpi3mr_ioc_facts facts;
u16 op_reply_desc_sz;
@@ -758,6 +1085,7 @@ struct mpi3mr_ioc {
dma_addr_t reply_buf_dma_max_address;
u16 reply_free_qsz;
+ u16 reply_sz;
struct dma_pool *reply_free_q_pool;
__le64 *reply_free_q;
dma_addr_t reply_free_q_dma;
@@ -791,6 +1119,7 @@ struct mpi3mr_ioc {
u8 scan_started;
u16 scan_failed;
u8 stop_drv_processing;
+ u8 device_refresh_on;
u16 max_host_ios;
spinlock_t tgtdev_lock;
@@ -803,21 +1132,33 @@ struct mpi3mr_ioc {
void *chain_bitmap;
spinlock_t chain_buf_lock;
+ struct mpi3mr_drv_cmd bsg_cmds;
struct mpi3mr_drv_cmd host_tm_cmds;
struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
u16 devrem_bitmap_sz;
void *devrem_bitmap;
u16 dev_handle_bitmap_sz;
void *removepend_bitmap;
struct list_head delayed_rmhs_list;
+ u16 evtack_cmds_bitmap_sz;
+ void *evtack_cmds_bitmap;
+ struct list_head delayed_evtack_cmds_list;
u32 ts_update_counter;
- u8 fault_dbg;
u8 reset_in_progress;
u8 unrecoverable;
+ int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
+ u8 prepare_for_reset;
+ u16 prepare_for_reset_timeout_counter;
+
+ void *prp_list_virt;
+ dma_addr_t prp_list_dma;
+ u32 prp_sz;
+
u16 diagsave_timeout;
int logging_level;
u16 flush_io_count;
@@ -825,7 +1166,50 @@ struct mpi3mr_ioc {
struct mpi3mr_fwevt *current_event;
struct mpi3_driver_info_layout driver_info;
u16 change_count;
+
+ u8 pel_enabled;
+ u8 pel_abort_requested;
+ u8 pel_class;
+ u16 pel_locale;
+ struct mpi3mr_drv_cmd pel_cmds;
+ struct mpi3mr_drv_cmd pel_abort_cmd;
+
+ u32 pel_newest_seqnum;
+ void *pel_seqnum_virt;
+ dma_addr_t pel_seqnum_dma;
+ u32 pel_seqnum_sz;
+
u16 op_reply_q_offset;
+ u16 default_qcount;
+ u16 active_poll_qcount;
+ u16 requested_poll_qcount;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
+ u8 stop_bsgs;
+ u8 *logdata_buf;
+ u16 logdata_buf_idx;
+ u16 logdata_entry_sz;
+
+ atomic_t pend_large_data_sz;
+ u32 io_throttle_data_length;
+ u32 io_throttle_high;
+ u32 io_throttle_low;
+ u16 num_io_throttle_group;
+ struct mpi3mr_throttle_group_info *throttle_groups;
+
+ void *cfg_page;
+ dma_addr_t cfg_page_dma;
+ u16 cfg_page_sz;
+
+ u8 sas_transport_enabled;
+ u8 scsi_device_channel;
+ struct mpi3mr_drv_cmd transport_cmds;
+ struct mpi3mr_sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head hba_port_table_list;
+ struct list_head enclosure_list;
};
/**
@@ -838,6 +1222,9 @@ struct mpi3mr_ioc {
* @send_ack: Event acknowledgment required or not
* @process_evt: Bottomhalf processing required or not
* @evt_ctx: Event context to send in Ack
+ * @event_data_size: size of the event data in bytes
+ * @pending_at_sml: waiting for device add/remove API to complete
+ * @discard: discard this event
* @ref_count: kref count
* @event_data: Actual MPI3 event data
*/
@@ -849,8 +1236,11 @@ struct mpi3mr_fwevt {
bool send_ack;
bool process_evt;
u32 evt_ctx;
+ u16 event_data_size;
+ bool pending_at_sml;
+ bool discard;
struct kref ref_count;
- char event_data[0] __aligned(4);
+ char event_data[] __aligned(4);
};
@@ -867,10 +1257,23 @@ struct delayed_dev_rmhs_node {
u8 iou_rc;
};
+/**
+ * struct delayed_evt_ack_node - Delayed event ack node
+ * @list: list head
+ * @event: MPI3 event ID
+ * @event_ctx: event context
+ */
+struct delayed_evt_ack_node {
+ struct list_head list;
+ u8 event;
+ u32 event_ctx;
+};
+
int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type);
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc);
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc);
int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
u16 admin_req_sz, u8 ignore_reset);
@@ -887,6 +1290,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma);
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc);
void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
struct mpi3_event_notification_reply *event_reply);
void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
@@ -897,13 +1301,11 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
u32 reset_reason, u8 snapdump);
-int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason);
void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
-int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
u32 event_ctx);
void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
@@ -911,6 +1313,88 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
-void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc);
-
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q);
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc);
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc);
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ u16 handle, uint lun, u16 htag, ulong timeout,
+ struct mpi3mr_drv_cmd *drv_cmd,
+ u8 *resp_code, struct scsi_cmnd *scmd);
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+ u16 event_data_size);
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle);
+extern const struct attribute_group *mpi3mr_host_groups[];
+extern const struct attribute_group *mpi3mr_dev_groups[];
+
+extern struct sas_function_template mpi3mr_transport_functions;
+extern struct scsi_transport_template *mpi3mr_transport_template;
+
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec);
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz);
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz);
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz);
+
+u8 mpi3mr_is_expander_device(u16 device_info);
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle);
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port);
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle);
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id);
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc);
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port);
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev);
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy);
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add);
+void mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc);
+void mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc);
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
new file mode 100644
index 000000000000..9baac224b213
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -0,0 +1,1860 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+#include <linux/bsg-lib.h>
+#include <uapi/scsi/scsi_bsg_mpi3mr.h>
+
+/**
+ * mpi3mr_bsg_pel_abort - sends PEL abort request
+ * @mrioc: Adapter instance reference
+ *
+ * This function sends PEL abort request to the firmware through
+ * admin request queue.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_pel_req_action_abort pel_abort_req;
+ struct mpi3_pel_reply *pel_reply;
+ int retval = 0;
+ u16 pe_log_status;
+
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ return -1;
+ }
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ return -1;
+ }
+
+ memset(&pel_abort_req, 0, sizeof(pel_abort_req));
+ mutex_lock(&mrioc->pel_abort_cmd.mutex);
+ if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) {
+ dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+ mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+ return -1;
+ }
+ mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING;
+ mrioc->pel_abort_cmd.is_waiting = 1;
+ mrioc->pel_abort_cmd.callback = NULL;
+ pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT);
+ pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_abort_req.action = MPI3_PEL_ACTION_ABORT;
+ pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+
+ mrioc->pel_abort_requested = 1;
+ init_completion(&mrioc->pel_abort_cmd.done);
+ retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req,
+ sizeof(pel_abort_req), 0);
+ if (retval) {
+ retval = -1;
+ dprint_bsg_err(mrioc, "%s: admin request post failed\n",
+ __func__);
+ mrioc->pel_abort_requested = 0;
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&mrioc->pel_abort_cmd.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) {
+ mrioc->pel_abort_cmd.is_waiting = 0;
+ dprint_bsg_err(mrioc, "%s: command timedout\n", __func__);
+ if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__, (mrioc->pel_abort_cmd.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->pel_abort_cmd.ioc_loginfo);
+ retval = -1;
+ goto out_unlock;
+ }
+ if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) {
+ pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply;
+ pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+ if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "%s: command failed, pel_status(0x%04x)\n",
+ __func__, pe_log_status);
+ retval = -1;
+ }
+ }
+
+out_unlock:
+ mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->pel_abort_cmd.mutex);
+ return retval;
+}
+/**
+ * mpi3mr_bsg_verify_adapter - verify adapter number is valid
+ * @ioc_number: Adapter number
+ *
+ * This function returns the adapter instance pointer of given
+ * adapter number. If adapter number does not match with the
+ * driver's adapter list, driver returns NULL.
+ *
+ * Return: adapter instance reference
+ */
+static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number)
+{
+ struct mpi3mr_ioc *mrioc = NULL;
+
+ spin_lock(&mrioc_list_lock);
+ list_for_each_entry(mrioc, &mrioc_list, list) {
+ if (mrioc->id == ioc_number) {
+ spin_unlock(&mrioc_list_lock);
+ return mrioc;
+ }
+ }
+ spin_unlock(&mrioc_list_lock);
+ return NULL;
+}
+
+/**
+ * mpi3mr_enable_logdata - Handler for log data enable
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function enables log data caching in the driver if not
+ * already enabled and return the maximum number of log data
+ * entries that can be cached in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ struct mpi3mr_logdata_enable logdata_enable;
+
+ if (!mrioc->logdata_buf) {
+ mrioc->logdata_entry_sz =
+ (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4))
+ + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ;
+ mrioc->logdata_buf_idx = 0;
+ mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES,
+ mrioc->logdata_entry_sz, GFP_KERNEL);
+
+ if (!mrioc->logdata_buf)
+ return -ENOMEM;
+ }
+
+ memset(&logdata_enable, 0, sizeof(logdata_enable));
+ logdata_enable.max_entries =
+ MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+ if (job->request_payload.payload_len >= sizeof(logdata_enable)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &logdata_enable, sizeof(logdata_enable));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+/**
+ * mpi3mr_get_logdata - Handler for get log data
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ * This function copies the log data entries to the user buffer
+ * when log caching is enabled in the driver.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz;
+
+ if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz))
+ return -EINVAL;
+
+ num_entries = job->request_payload.payload_len / entry_sz;
+ if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES)
+ num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES;
+ sz = num_entries * entry_sz;
+
+ if (job->request_payload.payload_len >= sz) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ mrioc->logdata_buf, sz);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_pel_enable - Handler for PEL enable driver
+ * @mrioc: Adapter instance reference
+ * @job: BSG job pointer
+ *
+ * This function is the handler for PEL enable driver.
+ * Validates the application given class and locale and if
+ * requires aborts the existing PEL wait request and/or issues
+ * new PEL wait request to the firmware and returns.
+ *
+ * Return: 0 on success and proper error codes on failure.
+ */
+static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ struct mpi3mr_bsg_out_pel_enable pel_enable;
+ u8 issue_pel_wait;
+ u8 tmp_class;
+ u16 tmp_locale;
+
+ if (job->request_payload.payload_len != sizeof(pel_enable)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ return rval;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &pel_enable, sizeof(pel_enable));
+
+ if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) {
+ dprint_bsg_err(mrioc, "%s: out of range class %d sent\n",
+ __func__, pel_enable.pel_class);
+ rval = 0;
+ goto out;
+ }
+ if (!mrioc->pel_enabled)
+ issue_pel_wait = 1;
+ else {
+ if ((mrioc->pel_class <= pel_enable.pel_class) &&
+ !((mrioc->pel_locale & pel_enable.pel_locale) ^
+ pel_enable.pel_locale)) {
+ issue_pel_wait = 0;
+ rval = 0;
+ } else {
+ pel_enable.pel_locale |= mrioc->pel_locale;
+
+ if (mrioc->pel_class < pel_enable.pel_class)
+ pel_enable.pel_class = mrioc->pel_class;
+
+ rval = mpi3mr_bsg_pel_abort(mrioc);
+ if (rval) {
+ dprint_bsg_err(mrioc,
+ "%s: pel_abort failed, status(%ld)\n",
+ __func__, rval);
+ goto out;
+ }
+ issue_pel_wait = 1;
+ }
+ }
+ if (issue_pel_wait) {
+ tmp_class = mrioc->pel_class;
+ tmp_locale = mrioc->pel_locale;
+ mrioc->pel_class = pel_enable.pel_class;
+ mrioc->pel_locale = pel_enable.pel_locale;
+ mrioc->pel_enabled = 1;
+ rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL);
+ if (rval) {
+ mrioc->pel_class = tmp_class;
+ mrioc->pel_locale = tmp_locale;
+ mrioc->pel_enabled = 0;
+ dprint_bsg_err(mrioc,
+ "%s: pel get sequence number failed, status(%ld)\n",
+ __func__, rval);
+ }
+ }
+
+out:
+ return rval;
+}
+/**
+ * mpi3mr_get_all_tgt_info - Get all target information
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the driver managed target devices device
+ * handle, persistent ID, bus ID and taret ID to the user
+ * provided buffer for the specific controller. This function
+ * also provides the number of devices managed by the driver for
+ * the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ u16 num_devices = 0, i = 0, size;
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_device_map_info *devmap_info = NULL;
+ struct mpi3mr_all_tgt_info *alltgt_info = NULL;
+ uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0;
+
+ if (job->request_payload.payload_len < sizeof(u32)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ return rval;
+ }
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ num_devices++;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ if ((job->request_payload.payload_len == sizeof(u32)) ||
+ list_empty(&mrioc->tgtdev_list)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &num_devices, sizeof(num_devices));
+ return 0;
+ }
+
+ kern_entrylen = (num_devices - 1) * sizeof(*devmap_info);
+ size = sizeof(*alltgt_info) + kern_entrylen;
+ alltgt_info = kzalloc(size, GFP_KERNEL);
+ if (!alltgt_info)
+ return -ENOMEM;
+
+ devmap_info = alltgt_info->dmi;
+ memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info)));
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (i < num_devices) {
+ devmap_info[i].handle = tgtdev->dev_handle;
+ devmap_info[i].perst_id = tgtdev->perst_id;
+ if (tgtdev->host_exposed && tgtdev->starget) {
+ devmap_info[i].target_id = tgtdev->starget->id;
+ devmap_info[i].bus_id =
+ tgtdev->starget->channel;
+ }
+ i++;
+ }
+ }
+ num_devices = i;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices));
+
+ usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info);
+ usr_entrylen *= sizeof(*devmap_info);
+ min_entrylen = min(usr_entrylen, kern_entrylen);
+ if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) {
+ dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n",
+ __func__, __LINE__);
+ rval = -EFAULT;
+ goto out;
+ }
+
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ alltgt_info, job->request_payload.payload_len);
+ rval = 0;
+out:
+ kfree(alltgt_info);
+ return rval;
+}
+/**
+ * mpi3mr_get_change_count - Get topology change count
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function copies the toplogy change count provided by the
+ * driver in events and cached in the driver to the user
+ * provided buffer for the specific controller.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ struct mpi3mr_change_count chgcnt;
+
+ memset(&chgcnt, 0, sizeof(chgcnt));
+ chgcnt.change_count = mrioc->change_count;
+ if (job->request_payload.payload_len >= sizeof(chgcnt)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &chgcnt, sizeof(chgcnt));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_adp_reset - Issue controller reset
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function identifies the user provided reset type and
+ * issues approporiate reset to the controller and wait for that
+ * to complete and reinitialize the controller and then returns
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ u8 save_snapdump;
+ struct mpi3mr_bsg_adp_reset adpreset;
+
+ if (job->request_payload.payload_len !=
+ sizeof(adpreset)) {
+ dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ __func__);
+ goto out;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &adpreset, sizeof(adpreset));
+
+ switch (adpreset.reset_type) {
+ case MPI3MR_BSG_ADPRESET_SOFT:
+ save_snapdump = 0;
+ break;
+ case MPI3MR_BSG_ADPRESET_DIAG_FAULT:
+ save_snapdump = 1;
+ break;
+ default:
+ dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n",
+ __func__, adpreset.reset_type);
+ goto out;
+ }
+
+ rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP,
+ save_snapdump);
+
+ if (rval)
+ dprint_bsg_err(mrioc,
+ "%s: reset handler returned error(%ld) for reset type %d\n",
+ __func__, rval, adpreset.reset_type);
+out:
+ return rval;
+}
+
+/**
+ * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler
+ * @mrioc: Adapter instance reference
+ * @job: BSG job reference
+ *
+ * This function provides adapter information for the given
+ * controller
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc,
+ struct bsg_job *job)
+{
+ enum mpi3mr_iocstate ioc_state;
+ struct mpi3mr_bsg_in_adpinfo adpinfo;
+
+ memset(&adpinfo, 0, sizeof(adpinfo));
+ adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY;
+ adpinfo.pci_dev_id = mrioc->pdev->device;
+ adpinfo.pci_dev_hw_rev = mrioc->pdev->revision;
+ adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device;
+ adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor;
+ adpinfo.pci_bus = mrioc->pdev->bus->number;
+ adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn);
+ adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn);
+ adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus);
+ adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION;
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+ else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+ else if (ioc_state == MRIOC_STATE_FAULT)
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+ else
+ adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+ memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info,
+ sizeof(adpinfo.driver_info));
+
+ if (job->request_payload.payload_len >= sizeof(adpinfo)) {
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ &adpinfo, sizeof(adpinfo));
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * mpi3mr_bsg_process_drv_cmds - Driver Command handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for driver commands,
+ * this does basic validation of the buffer and identifies the
+ * opcode and switches to correct sub handler.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ struct mpi3mr_ioc *mrioc = NULL;
+ struct mpi3mr_bsg_packet *bsg_req = NULL;
+ struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL;
+
+ bsg_req = job->request;
+ drvrcmd = &bsg_req->cmd.drvrcmd;
+
+ mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id);
+ if (!mrioc)
+ return -ENODEV;
+
+ if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) {
+ rval = mpi3mr_bsg_populate_adpinfo(mrioc, job);
+ return rval;
+ }
+
+ if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex))
+ return -ERESTARTSYS;
+
+ switch (drvrcmd->opcode) {
+ case MPI3MR_DRVBSG_OPCODE_ADPRESET:
+ rval = mpi3mr_bsg_adp_reset(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO:
+ rval = mpi3mr_get_all_tgt_info(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_GETCHGCNT:
+ rval = mpi3mr_get_change_count(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE:
+ rval = mpi3mr_enable_logdata(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_GETLOGDATA:
+ rval = mpi3mr_get_logdata(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_PELENABLE:
+ rval = mpi3mr_bsg_pel_enable(mrioc, job);
+ break;
+ case MPI3MR_DRVBSG_OPCODE_UNKNOWN:
+ default:
+ pr_err("%s: unsupported driver command opcode %d\n",
+ MPI3MR_DRIVER_NAME, drvrcmd->opcode);
+ break;
+ }
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ return rval;
+}
+
+/**
+ * mpi3mr_bsg_build_sgl - SGL construction for MPI commands
+ * @mpi_req: MPI request
+ * @sgl_offset: offset to start sgl in the MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ * @is_rmc: Does the buffer list has management command buffer
+ * @is_rmr: Does the buffer list has management response buffer
+ * @num_datasges: Number of data buffers in the list
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given MPI request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc,
+ u8 is_rmr, u8 num_datasges)
+{
+ u8 *sgl = (mpi_req + sgl_offset), count = 0;
+ struct mpi3_mgmt_passthrough_request *rmgmt_req =
+ (struct mpi3_mgmt_passthrough_request *)mpi_req;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u8 sgl_flags, sgl_flags_last;
+
+ sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
+ MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER;
+ sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST;
+
+ if (is_rmc) {
+ mpi3mr_add_sg_single(&rmgmt_req->command_sgl,
+ sgl_flags_last, drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf_dma);
+ sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len;
+ drv_buf_iter++;
+ count++;
+ if (is_rmr) {
+ mpi3mr_add_sg_single(&rmgmt_req->response_sgl,
+ sgl_flags_last, drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf_dma);
+ drv_buf_iter++;
+ count++;
+ } else
+ mpi3mr_build_zero_len_sge(
+ &rmgmt_req->response_sgl);
+ }
+ if (!num_datasges) {
+ mpi3mr_build_zero_len_sge(sgl);
+ return;
+ }
+ for (; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ if (num_datasges == 1 || !is_rmc)
+ mpi3mr_add_sg_single(sgl, sgl_flags_last,
+ drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+ else
+ mpi3mr_add_sg_single(sgl, sgl_flags,
+ drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma);
+ sgl += sizeof(struct mpi3_sge_common);
+ num_datasges--;
+ }
+}
+
+/**
+ * mpi3mr_get_nvme_data_fmt - returns the NVMe data format
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ *
+ * This function returns the type of the data format specified
+ * in user provided NVMe command in NVMe encapsulated request.
+ *
+ * Return: Data format of the NVMe command (PRP/SGL etc)
+ */
+static unsigned int mpi3mr_get_nvme_data_fmt(
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request)
+{
+ u8 format = 0;
+
+ format = ((nvme_encap_request->command[0] & 0xc000) >> 14);
+ return format;
+
+}
+
+/**
+ * mpi3mr_build_nvme_sgl - SGL constructor for NVME
+ * encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in sgl
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as SGEs in the given NVMe encapsulated request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc,
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+ struct mpi3mr_nvme_pt_sge *nvme_sgl;
+ u64 sgl_ptr;
+ u8 count;
+ size_t length = 0;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+ mrioc->facts.sge_mod_shift) << 32);
+ u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+ mrioc->facts.sge_mod_shift) << 32;
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any sgl.
+ */
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ sgl_ptr = (u64)drv_buf_iter->kern_buf_dma;
+ length = drv_buf_iter->kern_buf_len;
+ break;
+ }
+ if (!length)
+ return 0;
+
+ if (sgl_ptr & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: SGL address collides with SGE modifier\n",
+ __func__);
+ return -1;
+ }
+
+ sgl_ptr &= ~sgemod_mask;
+ sgl_ptr |= sgemod_val;
+ nvme_sgl = (struct mpi3mr_nvme_pt_sge *)
+ ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET);
+ memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge));
+ nvme_sgl->base_addr = sgl_ptr;
+ nvme_sgl->length = length;
+ return 0;
+}
+
+/**
+ * mpi3mr_build_nvme_prp - PRP constructor for NVME
+ * encapsulated request
+ * @mrioc: Adapter instance reference
+ * @nvme_encap_request: NVMe encapsulated MPI request
+ * @drv_bufs: DMA address of the buffers to be placed in SGL
+ * @bufcnt: Number of DMA buffers
+ *
+ * This function places the DMA address of the given buffers in
+ * proper format as PRP entries in the given NVMe encapsulated
+ * request.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+ struct mpi3_nvme_encapsulated_request *nvme_encap_request,
+ struct mpi3mr_buf_map *drv_bufs, u8 bufcnt)
+{
+ int prp_size = MPI3MR_NVME_PRP_SIZE;
+ __le64 *prp_entry, *prp1_entry, *prp2_entry;
+ __le64 *prp_page;
+ dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
+ u32 offset, entry_len, dev_pgsz;
+ u32 page_mask_result, page_mask;
+ size_t length = 0;
+ u8 count;
+ struct mpi3mr_buf_map *drv_buf_iter = drv_bufs;
+ u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) <<
+ mrioc->facts.sge_mod_shift) << 32);
+ u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) <<
+ mrioc->facts.sge_mod_shift) << 32;
+ u16 dev_handle = nvme_encap_request->dev_handle;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (!tgtdev) {
+ dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n",
+ __func__, dev_handle);
+ return -1;
+ }
+
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0) {
+ dprint_bsg_err(mrioc,
+ "%s: NVMe device page size is zero for handle 0x%04x\n",
+ __func__, dev_handle);
+ mpi3mr_tgtdev_put(tgtdev);
+ return -1;
+ }
+
+ dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz);
+ mpi3mr_tgtdev_put(tgtdev);
+
+ /*
+ * Not all commands require a data transfer. If no data, just return
+ * without constructing any PRP.
+ */
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ dma_addr = drv_buf_iter->kern_buf_dma;
+ length = drv_buf_iter->kern_buf_len;
+ break;
+ }
+
+ if (!length)
+ return 0;
+
+ mrioc->prp_sz = 0;
+ mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL);
+
+ if (!mrioc->prp_list_virt)
+ return -1;
+ mrioc->prp_sz = dev_pgsz;
+
+ /*
+ * Set pointers to PRP1 and PRP2, which are in the NVMe command.
+ * PRP1 is located at a 24 byte offset from the start of the NVMe
+ * command. Then set the current PRP entry pointer to PRP1.
+ */
+ prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+ MPI3MR_NVME_CMD_PRP1_OFFSET);
+ prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) +
+ MPI3MR_NVME_CMD_PRP2_OFFSET);
+ prp_entry = prp1_entry;
+ /*
+ * For the PRP entries, use the specially allocated buffer of
+ * contiguous memory.
+ */
+ prp_page = (__le64 *)mrioc->prp_list_virt;
+ prp_page_dma = mrioc->prp_list_dma;
+
+ /*
+ * Check if we are within 1 entry of a page boundary we don't
+ * want our first entry to be a PRP List entry.
+ */
+ page_mask = dev_pgsz - 1;
+ page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
+ if (!page_mask_result) {
+ dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n",
+ __func__);
+ goto err_out;
+ }
+
+ /*
+ * Set PRP physical pointer, which initially points to the current PRP
+ * DMA memory page.
+ */
+ prp_entry_dma = prp_page_dma;
+
+
+ /* Loop while the length is not zero. */
+ while (length) {
+ page_mask_result = (prp_entry_dma + prp_size) & page_mask;
+ if (!page_mask_result && (length > dev_pgsz)) {
+ dprint_bsg_err(mrioc,
+ "%s: single PRP page is not sufficient\n",
+ __func__);
+ goto err_out;
+ }
+
+ /* Need to handle if entry will be part of a page. */
+ offset = dma_addr & page_mask;
+ entry_len = dev_pgsz - offset;
+
+ if (prp_entry == prp1_entry) {
+ /*
+ * Must fill in the first PRP pointer (PRP1) before
+ * moving on.
+ */
+ *prp1_entry = cpu_to_le64(dma_addr);
+ if (*prp1_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP1 address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp1_entry &= ~sgemod_mask;
+ *prp1_entry |= sgemod_val;
+
+ /*
+ * Now point to the second PRP entry within the
+ * command (PRP2).
+ */
+ prp_entry = prp2_entry;
+ } else if (prp_entry == prp2_entry) {
+ /*
+ * Should the PRP2 entry be a PRP List pointer or just
+ * a regular PRP pointer? If there is more than one
+ * more page of data, must use a PRP List pointer.
+ */
+ if (length > dev_pgsz) {
+ /*
+ * PRP2 will contain a PRP List pointer because
+ * more PRP's are needed with this command. The
+ * list will start at the beginning of the
+ * contiguous buffer.
+ */
+ *prp2_entry = cpu_to_le64(prp_entry_dma);
+ if (*prp2_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP list address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp2_entry &= ~sgemod_mask;
+ *prp2_entry |= sgemod_val;
+
+ /*
+ * The next PRP Entry will be the start of the
+ * first PRP List.
+ */
+ prp_entry = prp_page;
+ continue;
+ } else {
+ /*
+ * After this, the PRP Entries are complete.
+ * This command uses 2 PRP's and no PRP list.
+ */
+ *prp2_entry = cpu_to_le64(dma_addr);
+ if (*prp2_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP2 collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp2_entry &= ~sgemod_mask;
+ *prp2_entry |= sgemod_val;
+ }
+ } else {
+ /*
+ * Put entry in list and bump the addresses.
+ *
+ * After PRP1 and PRP2 are filled in, this will fill in
+ * all remaining PRP entries in a PRP List, one per
+ * each time through the loop.
+ */
+ *prp_entry = cpu_to_le64(dma_addr);
+ if (*prp1_entry & sgemod_mask) {
+ dprint_bsg_err(mrioc,
+ "%s: PRP address collides with SGE modifier\n",
+ __func__);
+ goto err_out;
+ }
+ *prp_entry &= ~sgemod_mask;
+ *prp_entry |= sgemod_val;
+ prp_entry++;
+ prp_entry_dma++;
+ }
+
+ /*
+ * Bump the phys address of the command's data buffer by the
+ * entry_len.
+ */
+ dma_addr += entry_len;
+
+ /* decrement length accounting for last partial page. */
+ if (entry_len > length)
+ length = 0;
+ else
+ length -= entry_len;
+ }
+ return 0;
+err_out:
+ if (mrioc->prp_list_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+ mrioc->prp_list_virt, mrioc->prp_list_dma);
+ mrioc->prp_list_virt = NULL;
+ }
+ return -1;
+}
+/**
+ * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler
+ * @job: BSG job reference
+ *
+ * This function is the top level handler for MPI Pass through
+ * command, this does basic validation of the input data buffers,
+ * identifies the given buffer types and MPI command, allocates
+ * DMAable memory for user given buffers, construstcs SGL
+ * properly and passes the command to the firmware.
+ *
+ * Once the MPI command is completed the driver copies the data
+ * if any and reply, sense information to user provided buffers.
+ * If the command is timed out then issues controller reset
+ * prior to returning.
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+
+static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len)
+{
+ long rval = -EINVAL;
+
+ struct mpi3mr_ioc *mrioc = NULL;
+ u8 *mpi_req = NULL, *sense_buff_k = NULL;
+ u8 mpi_msg_size = 0;
+ struct mpi3mr_bsg_packet *bsg_req = NULL;
+ struct mpi3mr_bsg_mptcmd *karg;
+ struct mpi3mr_buf_entry *buf_entries = NULL;
+ struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL;
+ u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0;
+ u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0;
+ u8 block_io = 0, resp_code = 0, nvme_fmt = 0;
+ struct mpi3_request_header *mpi_header = NULL;
+ struct mpi3_status_reply_descriptor *status_desc;
+ struct mpi3_scsi_task_mgmt_request *tm_req;
+ u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen;
+ u16 dev_handle;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *stgt_priv = NULL;
+ struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL;
+ u32 din_size = 0, dout_size = 0;
+ u8 *din_buf = NULL, *dout_buf = NULL;
+ u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL;
+
+ bsg_req = job->request;
+ karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd;
+
+ mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id);
+ if (!mrioc)
+ return -ENODEV;
+
+ if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT)
+ karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT;
+
+ mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL);
+ if (!mpi_req)
+ return -ENOMEM;
+ mpi_header = (struct mpi3_request_header *)mpi_req;
+
+ bufcnt = karg->buf_entry_list.num_of_entries;
+ drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL);
+ if (!drv_bufs) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ dout_buf = kzalloc(job->request_payload.payload_len,
+ GFP_KERNEL);
+ if (!dout_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ din_buf = kzalloc(job->reply_payload.payload_len,
+ GFP_KERNEL);
+ if (!din_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ dout_buf, job->request_payload.payload_len);
+
+ buf_entries = karg->buf_entry_list.buf_entry;
+ sgl_din_iter = din_buf;
+ sgl_dout_iter = dout_buf;
+ drv_buf_iter = drv_bufs;
+
+ for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) {
+
+ if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+ if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) {
+ dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ switch (buf_entries->buf_type) {
+ case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_TO_DEVICE;
+ is_rmcb = 1;
+ if (count != 0)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+ is_rmrb = 1;
+ if (count != 1 || !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_DATA_IN:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_FROM_DEVICE;
+ din_cnt++;
+ din_size += drv_buf_iter->bsg_buf_len;
+ if ((din_cnt > 1) && !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_DATA_OUT:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_TO_DEVICE;
+ dout_cnt++;
+ dout_size += drv_buf_iter->bsg_buf_len;
+ if ((dout_cnt > 1) && !is_rmcb)
+ invalid_be = 1;
+ break;
+ case MPI3MR_BSG_BUFTYPE_MPI_REPLY:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ mpirep_offset = count;
+ break;
+ case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE:
+ sgl_iter = sgl_din_iter;
+ sgl_din_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ erb_offset = count;
+ break;
+ case MPI3MR_BSG_BUFTYPE_MPI_REQUEST:
+ sgl_iter = sgl_dout_iter;
+ sgl_dout_iter += buf_entries->buf_len;
+ drv_buf_iter->data_dir = DMA_NONE;
+ mpi_msg_size = buf_entries->buf_len;
+ if ((!mpi_msg_size || (mpi_msg_size % 4)) ||
+ (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) {
+ dprint_bsg_err(mrioc, "%s: invalid MPI message size\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+ memcpy(mpi_req, sgl_iter, buf_entries->buf_len);
+ break;
+ default:
+ invalid_be = 1;
+ break;
+ }
+ if (invalid_be) {
+ dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n",
+ __func__);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ drv_buf_iter->bsg_buf = sgl_iter;
+ drv_buf_iter->bsg_buf_len = buf_entries->buf_len;
+
+ }
+ if (!is_rmcb && (dout_cnt || din_cnt)) {
+ sg_entries = dout_cnt + din_cnt;
+ if (((mpi_msg_size) + (sg_entries *
+ sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid message size passed\n",
+ __func__, __LINE__);
+ rval = -EINVAL;
+ goto out;
+ }
+ }
+ if (din_size > MPI3MR_MAX_APP_XFER_SIZE) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n",
+ __func__, __LINE__, mpi_header->function, din_size);
+ rval = -EINVAL;
+ goto out;
+ }
+ if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) {
+ dprint_bsg_err(mrioc,
+ "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n",
+ __func__, __LINE__, mpi_header->function, dout_size);
+ rval = -EINVAL;
+ goto out;
+ }
+
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+
+ drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len;
+ if (is_rmcb && !count)
+ drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) *
+ sizeof(struct mpi3_sge_common));
+
+ if (!drv_buf_iter->kern_buf_len)
+ continue;
+
+ drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev,
+ drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma,
+ GFP_KERNEL);
+ if (!drv_buf_iter->kern_buf) {
+ rval = -ENOMEM;
+ goto out;
+ }
+ if (drv_buf_iter->data_dir == DMA_TO_DEVICE) {
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen);
+ }
+ }
+
+ if (erb_offset != 0xFF) {
+ sense_buff_k = kzalloc(erbsz, GFP_KERNEL);
+ if (!sense_buff_k) {
+ rval = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) {
+ rval = -ERESTARTSYS;
+ goto out;
+ }
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) {
+ rval = -EAGAIN;
+ dprint_bsg_err(mrioc, "%s: command is in use\n", __func__);
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->unrecoverable) {
+ dprint_bsg_err(mrioc, "%s: unrecoverable controller\n",
+ __func__);
+ rval = -EFAULT;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->reset_in_progress) {
+ dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__);
+ rval = -EAGAIN;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ if (mrioc->stop_bsgs) {
+ dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__);
+ rval = -EAGAIN;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+
+ if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) {
+ nvme_fmt = mpi3mr_get_nvme_data_fmt(
+ (struct mpi3_nvme_encapsulated_request *)mpi_req);
+ if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) {
+ if (mpi3mr_build_nvme_prp(mrioc,
+ (struct mpi3_nvme_encapsulated_request *)mpi_req,
+ drv_bufs, bufcnt)) {
+ rval = -ENOMEM;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 ||
+ nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) {
+ if (mpi3mr_build_nvme_sgl(mrioc,
+ (struct mpi3_nvme_encapsulated_request *)mpi_req,
+ drv_bufs, bufcnt)) {
+ rval = -EINVAL;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else {
+ dprint_bsg_err(mrioc,
+ "%s:invalid NVMe command format\n", __func__);
+ rval = -EINVAL;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+ goto out;
+ }
+ } else {
+ mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size),
+ drv_bufs, bufcnt, is_rmcb, is_rmrb,
+ (dout_cnt + din_cnt));
+ }
+
+ if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) {
+ tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req;
+ if (tm_req->task_type !=
+ MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ dev_handle = tm_req->dev_handle;
+ block_io = 1;
+ }
+ }
+ if (block_io) {
+ tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
+ stgt_priv = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
+ atomic_inc(&stgt_priv->block_io);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+
+ mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->bsg_cmds.is_waiting = 1;
+ mrioc->bsg_cmds.callback = NULL;
+ mrioc->bsg_cmds.is_sense = 0;
+ mrioc->bsg_cmds.sensebuf = sense_buff_k;
+ memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz);
+ mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS);
+ if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) {
+ dprint_bsg_info(mrioc,
+ "%s: posting bsg request to the controller\n", __func__);
+ dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+ "bsg_mpi3_req");
+ if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+ drv_buf_iter = &drv_bufs[0];
+ dprint_dump(drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ }
+ }
+
+ init_completion(&mrioc->bsg_cmds.done);
+ rval = mpi3mr_admin_request_post(mrioc, mpi_req,
+ MPI3MR_ADMIN_REQ_FRAME_SZ, 0);
+
+
+ if (rval) {
+ mrioc->bsg_cmds.is_waiting = 0;
+ dprint_bsg_err(mrioc,
+ "%s: posting bsg request is failed\n", __func__);
+ rval = -EAGAIN;
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->bsg_cmds.done,
+ (karg->timeout * HZ));
+ if (block_io && stgt_priv)
+ atomic_dec(&stgt_priv->block_io);
+ if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mrioc->bsg_cmds.is_waiting = 0;
+ rval = -EAGAIN;
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)
+ goto out_unlock;
+ dprint_bsg_err(mrioc,
+ "%s: bsg request timedout after %d seconds\n", __func__,
+ karg->timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) {
+ dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ,
+ "bsg_mpi3_req");
+ if (mpi_header->function ==
+ MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) {
+ drv_buf_iter = &drv_bufs[0];
+ dprint_dump(drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_len, "mpi3_mgmt_req");
+ }
+ }
+
+ if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) ||
+ (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO))
+ mpi3mr_issue_tm(mrioc,
+ MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ mpi_header->function_dependent, 0,
+ MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT,
+ &mrioc->host_tm_cmds, &resp_code, NULL);
+ if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) &&
+ !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_APP_TIMEOUT, 1);
+ goto out_unlock;
+ }
+ dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__);
+
+ if (mrioc->prp_list_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz,
+ mrioc->prp_list_virt, mrioc->prp_list_dma);
+ mrioc->prp_list_virt = NULL;
+ }
+
+ if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_info(mrioc,
+ "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ mrioc->bsg_cmds.ioc_loginfo);
+ }
+
+ if ((mpirep_offset != 0xFF) &&
+ drv_bufs[mpirep_offset].bsg_buf_len) {
+ drv_buf_iter = &drv_bufs[mpirep_offset];
+ drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 +
+ mrioc->reply_sz);
+ bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL);
+
+ if (!bsg_reply_buf) {
+ rval = -ENOMEM;
+ goto out_unlock;
+ }
+ if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) {
+ bsg_reply_buf->mpi_reply_type =
+ MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS;
+ memcpy(bsg_reply_buf->reply_buf,
+ mrioc->bsg_cmds.reply, mrioc->reply_sz);
+ } else {
+ bsg_reply_buf->mpi_reply_type =
+ MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS;
+ status_desc = (struct mpi3_status_reply_descriptor *)
+ bsg_reply_buf->reply_buf;
+ status_desc->ioc_status = mrioc->bsg_cmds.ioc_status;
+ status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo;
+ }
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen);
+ }
+
+ if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf &&
+ mrioc->bsg_cmds.is_sense) {
+ drv_buf_iter = &drv_bufs[erb_offset];
+ tmplen = min(erbsz, drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen);
+ }
+
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->data_dir == DMA_NONE)
+ continue;
+ if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) {
+ tmplen = min(drv_buf_iter->kern_buf_len,
+ drv_buf_iter->bsg_buf_len);
+ memcpy(drv_buf_iter->bsg_buf,
+ drv_buf_iter->kern_buf, tmplen);
+ }
+ }
+
+out_unlock:
+ if (din_buf) {
+ *reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ din_buf, job->reply_payload.payload_len);
+ }
+ mrioc->bsg_cmds.is_sense = 0;
+ mrioc->bsg_cmds.sensebuf = NULL;
+ mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
+out:
+ kfree(sense_buff_k);
+ kfree(dout_buf);
+ kfree(din_buf);
+ kfree(mpi_req);
+ if (drv_bufs) {
+ drv_buf_iter = drv_bufs;
+ for (count = 0; count < bufcnt; count++, drv_buf_iter++) {
+ if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma)
+ dma_free_coherent(&mrioc->pdev->dev,
+ drv_buf_iter->kern_buf_len,
+ drv_buf_iter->kern_buf,
+ drv_buf_iter->kern_buf_dma);
+ }
+ kfree(drv_bufs);
+ }
+ kfree(bsg_reply_buf);
+ return rval;
+}
+
+/**
+ * mpi3mr_app_save_logdata - Save Log Data events
+ * @mrioc: Adapter instance reference
+ * @event_data: event data associated with log data event
+ * @event_data_size: event data size to copy
+ *
+ * If log data event caching is enabled by the applicatiobns,
+ * then this function saves the log data in the circular queue
+ * and Sends async signal SIGIO to indicate there is an async
+ * event from the firmware to the event monitoring applications.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data,
+ u16 event_data_size)
+{
+ u32 index = mrioc->logdata_buf_idx, sz;
+ struct mpi3mr_logdata_entry *entry;
+
+ if (!(mrioc->logdata_buf))
+ return;
+
+ entry = (struct mpi3mr_logdata_entry *)
+ (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz));
+ entry->valid_entry = 1;
+ sz = min(mrioc->logdata_entry_sz, event_data_size);
+ memcpy(entry->data, event_data, sz);
+ mrioc->logdata_buf_idx =
+ ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES);
+ atomic64_inc(&event_counter);
+}
+
+/**
+ * mpi3mr_bsg_request - bsg request entry point
+ * @job: BSG job reference
+ *
+ * This is driver's entry point for bsg requests
+ *
+ * Return: 0 on success and proper error codes on failure
+ */
+static int mpi3mr_bsg_request(struct bsg_job *job)
+{
+ long rval = -EINVAL;
+ unsigned int reply_payload_rcv_len = 0;
+
+ struct mpi3mr_bsg_packet *bsg_req = job->request;
+
+ switch (bsg_req->cmd_type) {
+ case MPI3MR_DRV_CMD:
+ rval = mpi3mr_bsg_process_drv_cmds(job);
+ break;
+ case MPI3MR_MPT_CMD:
+ rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len);
+ break;
+ default:
+ pr_err("%s: unsupported BSG command(0x%08x)\n",
+ MPI3MR_DRIVER_NAME, bsg_req->cmd_type);
+ break;
+ }
+
+ bsg_job_done(job, rval, reply_payload_rcv_len);
+
+ return 0;
+}
+
+/**
+ * mpi3mr_bsg_exit - de-registration from bsg layer
+ *
+ * This will be called during driver unload and all
+ * bsg resources allocated during load will be freed.
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc)
+{
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ if (!mrioc->bsg_queue)
+ return;
+
+ bsg_remove_queue(mrioc->bsg_queue);
+ mrioc->bsg_queue = NULL;
+
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+}
+
+/**
+ * mpi3mr_bsg_node_release -release bsg device node
+ * @dev: bsg device node
+ *
+ * decrements bsg dev parent reference count
+ *
+ * Return:Nothing
+ */
+static void mpi3mr_bsg_node_release(struct device *dev)
+{
+ put_device(dev->parent);
+}
+
+/**
+ * mpi3mr_bsg_init - registration with bsg layer
+ *
+ * This will be called during driver load and it will
+ * register driver with bsg layer
+ *
+ * Return:Nothing
+ */
+void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc)
+{
+ struct device *bsg_dev = &mrioc->bsg_dev;
+ struct device *parent = &mrioc->shost->shost_gendev;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = mpi3mr_bsg_node_release;
+
+ dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id);
+
+ if (device_add(bsg_dev)) {
+ ioc_err(mrioc, "%s: bsg device add failed\n",
+ dev_name(bsg_dev));
+ put_device(bsg_dev);
+ return;
+ }
+
+ mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev),
+ mpi3mr_bsg_request, NULL, 0);
+ if (IS_ERR(mrioc->bsg_queue)) {
+ ioc_err(mrioc, "%s: bsg registration failed\n",
+ dev_name(bsg_dev));
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+ return;
+ }
+
+ blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS);
+ blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS);
+
+ return;
+}
+
+/**
+ * version_fw_show - SysFS callback for firmware version read
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware version
+ */
+static ssize_t
+version_fw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver;
+
+ return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n",
+ fwver->gen_major, fwver->gen_minor, fwver->ph_major,
+ fwver->ph_minor, fwver->cust_id, fwver->build_num);
+}
+static DEVICE_ATTR_RO(version_fw);
+
+/**
+ * fw_queue_depth_show - SysFS callback for firmware max cmds
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware max commands
+ */
+static ssize_t
+fw_queue_depth_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs);
+}
+static DEVICE_ATTR_RO(fw_queue_depth);
+
+/**
+ * op_req_q_count_show - SysFS callback for request queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying request queue count
+ */
+static ssize_t
+op_req_q_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q);
+}
+static DEVICE_ATTR_RO(op_req_q_count);
+
+/**
+ * reply_queue_count_show - SysFS callback for reply queue count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying reply queue count
+ */
+static ssize_t
+reply_queue_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q);
+}
+
+static DEVICE_ATTR_RO(reply_queue_count);
+
+/**
+ * logging_level_show - Show controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * A sysfs 'read/write' shost attribute, to show the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+logging_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%08xh\n", mrioc->logging_level);
+}
+
+/**
+ * logging_level_store- Change controller debug level
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ * @count: size of the buffer
+ *
+ * A sysfs 'read/write' shost attribute, to change the current
+ * debug log level used by the driver for the specific
+ * controller.
+ *
+ * Return: strlen() return
+ */
+static ssize_t
+logging_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int val = 0;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ mrioc->logging_level = val;
+ ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(logging_level);
+
+/**
+ * adp_state_show() - SysFS callback for adapter state show
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying adapter state
+ */
+static ssize_t
+adp_state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ enum mpi3mr_iocstate ioc_state;
+ uint8_t adp_state;
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_UNRECOVERABLE)
+ adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE;
+ else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs))
+ adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET;
+ else if (ioc_state == MRIOC_STATE_FAULT)
+ adp_state = MPI3MR_BSG_ADPSTATE_FAULT;
+ else
+ adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL;
+
+ return sysfs_emit(buf, "%u\n", adp_state);
+}
+
+static DEVICE_ATTR_RO(adp_state);
+
+static struct attribute *mpi3mr_host_attrs[] = {
+ &dev_attr_version_fw.attr,
+ &dev_attr_fw_queue_depth.attr,
+ &dev_attr_op_req_q_count.attr,
+ &dev_attr_reply_queue_count.attr,
+ &dev_attr_logging_level.attr,
+ &dev_attr_adp_state.attr,
+ NULL,
+};
+
+static const struct attribute_group mpi3mr_host_attr_group = {
+ .attrs = mpi3mr_host_attrs
+};
+
+const struct attribute_group *mpi3mr_host_groups[] = {
+ &mpi3mr_host_attr_group,
+ NULL,
+};
+
+
+/*
+ * SCSI Device attributes under sysfs
+ */
+
+/**
+ * sas_address_show - SysFS callback for dev SASaddress display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying SAS address of the
+ * specific SAS/SATA end device.
+ */
+static ssize_t
+sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA)
+ return 0;
+ return sysfs_emit(buf, "0x%016llx\n",
+ (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address);
+}
+
+static DEVICE_ATTR_RO(sas_address);
+
+/**
+ * device_handle_show - SysFS callback for device handle display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying firmware internal
+ * device handle of the specific device.
+ */
+static ssize_t
+device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev)
+ return 0;
+ return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle);
+}
+
+static DEVICE_ATTR_RO(device_handle);
+
+/**
+ * persistent_id_show - SysFS callback for persisten ID display
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Return: sysfs_emit() return after copying persistent ID of the
+ * of the specific device.
+ */
+static ssize_t
+persistent_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct mpi3mr_stgt_priv_data *tgt_priv_data;
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ sdev_priv_data = sdev->hostdata;
+ if (!sdev_priv_data)
+ return 0;
+
+ tgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (!tgt_priv_data)
+ return 0;
+ tgtdev = tgt_priv_data->tgt_dev;
+ if (!tgtdev)
+ return 0;
+ return sysfs_emit(buf, "%d\n", tgtdev->perst_id);
+}
+static DEVICE_ATTR_RO(persistent_id);
+
+static struct attribute *mpi3mr_dev_attrs[] = {
+ &dev_attr_sas_address.attr,
+ &dev_attr_device_handle.attr,
+ &dev_attr_persistent_id.attr,
+ NULL,
+};
+
+static const struct attribute_group mpi3mr_dev_attr_group = {
+ .attrs = mpi3mr_dev_attrs
+};
+
+const struct attribute_group *mpi3mr_dev_groups[] = {
+ &mpi3mr_dev_attr_group,
+ NULL,
+};
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
index c085bb048d41..ee6edd8322e6 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_debug.h
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -14,27 +14,24 @@
/*
* debug levels
*/
-#define MPI3_DEBUG 0x00000001
-#define MPI3_DEBUG_MSG_FRAME 0x00000002
-#define MPI3_DEBUG_SG 0x00000004
-#define MPI3_DEBUG_EVENTS 0x00000008
-#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000010
-#define MPI3_DEBUG_INIT 0x00000020
-#define MPI3_DEBUG_EXIT 0x00000040
-#define MPI3_DEBUG_FAIL 0x00000080
-#define MPI3_DEBUG_TM 0x00000100
-#define MPI3_DEBUG_REPLY 0x00000200
-#define MPI3_DEBUG_HANDSHAKE 0x00000400
-#define MPI3_DEBUG_CONFIG 0x00000800
-#define MPI3_DEBUG_DL 0x00001000
-#define MPI3_DEBUG_RESET 0x00002000
-#define MPI3_DEBUG_SCSI 0x00004000
-#define MPI3_DEBUG_IOCTL 0x00008000
-#define MPI3_DEBUG_CSMISAS 0x00010000
-#define MPI3_DEBUG_SAS 0x00020000
-#define MPI3_DEBUG_TRANSPORT 0x00040000
-#define MPI3_DEBUG_TASK_SET_FULL 0x00080000
-#define MPI3_DEBUG_TRIGGER_DIAG 0x00200000
+
+#define MPI3_DEBUG_EVENT 0x00000001
+#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002
+#define MPI3_DEBUG_INIT 0x00000004
+#define MPI3_DEBUG_EXIT 0x00000008
+#define MPI3_DEBUG_TM 0x00000010
+#define MPI3_DEBUG_RESET 0x00000020
+#define MPI3_DEBUG_SCSI_ERROR 0x00000040
+#define MPI3_DEBUG_REPLY 0x00000080
+#define MPI3_DEBUG_CFG_ERROR 0x00000100
+#define MPI3_DEBUG_TRANSPORT_ERROR 0x00000200
+#define MPI3_DEBUG_BSG_ERROR 0x00008000
+#define MPI3_DEBUG_BSG_INFO 0x00010000
+#define MPI3_DEBUG_SCSI_INFO 0x00020000
+#define MPI3_DEBUG_CFG_INFO 0x00040000
+#define MPI3_DEBUG_TRANSPORT_INFO 0x00080000
+#define MPI3_DEBUG 0x01000000
+#define MPI3_DEBUG_SG 0x02000000
/*
@@ -50,11 +47,151 @@
#define ioc_info(ioc, fmt, ...) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define dprint(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_th(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_bh(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_init(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_INIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_exit(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EXIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_tm(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TM) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reply(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_REPLY) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reset(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_RESET) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \
+ do { \
+ if (ioc->logging_level & LOG_LEVEL) \
+ scsi_print_command(SCMD); \
+ } while (0)
+
-#define dbgprint(IOC, FMT, ...) \
+#define dprint_bsg_info(ioc, fmt, ...) \
do { \
- if (IOC->logging_level & MPI3_DEBUG) \
- pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
+ if (ioc->logging_level & MPI3_DEBUG_BSG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_bsg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_BSG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_cfg_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_CFG_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+#define dprint_transport_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_transport_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TRANSPORT_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#endif /* MPT3SAS_DEBUG_H_INCLUDED */
+
+/**
+ * dprint_dump - print contents of a memory buffer
+ * @req: Pointer to a memory buffer
+ * @sz: Memory buffer size
+ * @namestr: Name String to identify the buffer type
+ */
+static inline void
+dprint_dump(void *req, int sz, const char *name_string)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ sz = sz/4;
+ if (name_string)
+ pr_info("%s:\n\t", name_string);
+ else
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+
+/**
+ * dprint_dump_req - print message frame contents
+ * @req: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+dprint_dump_req(void *req, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index aa5d877df6f8..0c4aabaefdcc 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -10,6 +10,18 @@
#include "mpi3mr.h"
#include <linux/io-64-nonatomic-lo-hi.h>
+static int
+mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data);
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd);
+
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
+
#if defined(writeq) && defined(CONFIG_64BIT)
static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
{
@@ -78,6 +90,7 @@ static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
kfree(mrioc->intr_info);
mrioc->intr_info = NULL;
mrioc->intr_info_count = 0;
+ mrioc->is_intr_info_set = false;
pci_free_irq_vectors(mrioc->pdev);
}
@@ -124,8 +137,9 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
u64 reply_dma)
{
u32 old_idx = 0;
+ unsigned long flags;
- spin_lock(&mrioc->reply_free_queue_lock);
+ spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
old_idx = mrioc->reply_free_queue_host_index;
mrioc->reply_free_queue_host_index = (
(mrioc->reply_free_queue_host_index ==
@@ -134,15 +148,16 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
writel(mrioc->reply_free_queue_host_index,
&mrioc->sysif_regs->reply_free_host_index);
- spin_unlock(&mrioc->reply_free_queue_lock);
+ spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
}
void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma)
{
u32 old_idx = 0;
+ unsigned long flags;
- spin_lock(&mrioc->sbq_lock);
+ spin_lock_irqsave(&mrioc->sbq_lock, flags);
old_idx = mrioc->sbq_host_index;
mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
(mrioc->sense_buf_q_sz - 1)) ? 0 :
@@ -150,7 +165,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
writel(mrioc->sbq_host_index,
&mrioc->sysif_regs->sense_buffer_free_host_index);
- spin_unlock(&mrioc->sbq_lock);
+ spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
}
static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
@@ -171,9 +186,6 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
case MPI3_EVENT_GPIO_INTERRUPT:
desc = "GPIO Interrupt";
break;
- case MPI3_EVENT_TEMP_THRESHOLD:
- desc = "Temperature Threshold";
- break;
case MPI3_EVENT_CABLE_MGMT:
desc = "Cable Management";
break;
@@ -232,6 +244,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
desc = "Enclosure Device Status Change";
break;
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ desc = "Enclosure Added";
+ break;
case MPI3_EVENT_HARD_RESET_RECEIVED:
desc = "Hard Reset Received";
break;
@@ -287,8 +302,18 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
switch (host_tag) {
case MPI3MR_HOSTTAG_INITCMDS:
return &mrioc->init_cmds;
+ case MPI3MR_HOSTTAG_CFG_CMDS:
+ return &mrioc->cfg_cmds;
+ case MPI3MR_HOSTTAG_BSG_CMDS:
+ return &mrioc->bsg_cmds;
case MPI3MR_HOSTTAG_BLK_TMS:
return &mrioc->host_tm_cmds;
+ case MPI3MR_HOSTTAG_PEL_ABORT:
+ return &mrioc->pel_abort_cmd;
+ case MPI3MR_HOSTTAG_PEL_WAIT:
+ return &mrioc->pel_cmds;
+ case MPI3MR_HOSTTAG_TRANSPORT_CMDS:
+ return &mrioc->transport_cmds;
case MPI3MR_HOSTTAG_INVALID:
if (def_reply && def_reply->function ==
MPI3_FUNCTION_EVENT_NOTIFICATION)
@@ -303,6 +328,12 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
return &mrioc->dev_rmhs_cmds[idx];
}
+ if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ return &mrioc->evtack_cmds[idx];
+ }
+
return NULL;
}
@@ -369,7 +400,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (def_reply) {
cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
- mrioc->facts.reply_sz);
+ mrioc->reply_sz);
}
if (cmdptr->is_waiting) {
complete(&cmdptr->done);
@@ -400,6 +431,9 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
return 0;
do {
+ if (mrioc->unrecoverable)
+ break;
+
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
mpi3mr_process_admin_reply_desc(mrioc, reply_desc, &reply_dma);
if (reply_dma)
@@ -446,10 +480,21 @@ mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
return reply_desc;
}
-static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_intr_info *intr_info)
+/**
+ * mpi3mr_process_op_reply_q - Operational reply queue handler
+ * @mrioc: Adapter instance reference
+ * @op_reply_q: Operational reply queue info
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q)
{
- struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
struct op_req_qinfo *op_req_q;
u32 exp_phase;
u32 reply_ci;
@@ -474,6 +519,9 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
+ if (mrioc->unrecoverable)
+ break;
+
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
op_req_q = &mrioc->req_qinfo[req_q_idx];
@@ -495,15 +543,16 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
if ((le16_to_cpu(reply_desc->reply_flags) &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+#ifndef CONFIG_PREEMPT_RT
/*
* Exit completion loop to avoid CPU lockup
* Ensure remaining completion happens from threaded ISR.
*/
if (num_op_reply > mrioc->max_host_ios) {
- intr_info->op_reply_q->enable_irq_poll = true;
+ op_reply_q->enable_irq_poll = true;
break;
}
-
+#endif
} while (1);
writel(reply_ci,
@@ -515,6 +564,35 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
return num_op_reply;
}
+/**
+ * mpi3mr_blk_mq_poll - Operational reply queue handler
+ * @shost: SCSI Host reference
+ * @queue_num: Request queue number (w.r.t OS it is hardware context number)
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct mpi3mr_ioc *mrioc;
+
+ mrioc = (struct mpi3mr_ioc *)shost->hostdata;
+
+ if ((mrioc->reset_in_progress || mrioc->prepare_for_reset ||
+ mrioc->unrecoverable))
+ return 0;
+
+ num_entries = mpi3mr_process_op_reply_q(mrioc,
+ &mrioc->op_reply_qinfo[queue_num]);
+
+ return num_entries;
+}
+
static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
{
struct mpi3mr_intr_info *intr_info = privdata;
@@ -535,7 +613,8 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
if (!midx)
num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
- num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
if (num_admin_replies || num_op_reply)
return IRQ_HANDLED;
@@ -543,18 +622,16 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
return IRQ_NONE;
}
+#ifndef CONFIG_PREEMPT_RT
+
static irqreturn_t mpi3mr_isr(int irq, void *privdata)
{
struct mpi3mr_intr_info *intr_info = privdata;
- struct mpi3mr_ioc *mrioc;
- u16 midx;
int ret;
if (!intr_info)
return IRQ_NONE;
- mrioc = intr_info->mrioc;
- midx = intr_info->msix_index;
/* Call primary ISR routine */
ret = mpi3mr_isr_primary(irq, privdata);
@@ -569,7 +646,7 @@ static irqreturn_t mpi3mr_isr(int irq, void *privdata)
!atomic_read(&intr_info->op_reply_q->pend_ios))
return ret;
- disable_irq_nosync(pci_irq_vector(mrioc->pdev, midx));
+ disable_irq_nosync(intr_info->os_irq);
return IRQ_WAKE_THREAD;
}
@@ -599,26 +676,29 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
/* Poll for pending IOs completions */
do {
- if (!mrioc->intr_enabled)
+ if (!mrioc->intr_enabled || mrioc->unrecoverable)
break;
if (!midx)
mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
num_op_reply +=
- mpi3mr_process_op_reply_q(mrioc, intr_info);
+ mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
- usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
+ usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
(num_op_reply < mrioc->max_host_ios));
intr_info->op_reply_q->enable_irq_poll = false;
- enable_irq(pci_irq_vector(mrioc->pdev, midx));
+ enable_irq(intr_info->os_irq);
return IRQ_HANDLED;
}
+#endif
+
/**
* mpi3mr_request_irq - Request IRQ and register ISR
* @mrioc: Adapter instance reference
@@ -641,17 +721,42 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
snprintf(intr_info->name, MPI3MR_NAME_LENGTH, "%s%d-msix%d",
mrioc->driver_name, mrioc->id, index);
+#ifndef CONFIG_PREEMPT_RT
retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr,
mpi3mr_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+#else
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), mpi3mr_isr_primary,
+ NULL, IRQF_SHARED, intr_info->name, intr_info);
+#endif
if (retval) {
ioc_err(mrioc, "%s: Unable to allocate interrupt %d!\n",
intr_info->name, pci_irq_vector(pdev, index));
return retval;
}
+ intr_info->os_irq = pci_irq_vector(pdev, index);
return retval;
}
+static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
+{
+ if (!mrioc->requested_poll_qcount)
+ return;
+
+ /* Reserved for Admin and Default Queue */
+ if (max_vectors > 2 &&
+ (mrioc->requested_poll_qcount < max_vectors - 2)) {
+ ioc_info(mrioc,
+ "enabled polled queues (%d) msix (%d)\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ } else {
+ ioc_info(mrioc,
+ "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ mrioc->requested_poll_qcount = 0;
+ }
+}
+
/**
* mpi3mr_setup_isr - Setup ISR for the controller
* @mrioc: Adapter instance reference
@@ -664,48 +769,72 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
{
unsigned int irq_flags = PCI_IRQ_MSIX;
- int max_vectors;
+ int max_vectors, min_vec;
int retval;
int i;
- struct irq_affinity desc = { .pre_vectors = 1};
+ struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
+
+ if (mrioc->is_intr_info_set)
+ return 0;
mpi3mr_cleanup_isr(mrioc);
- if (setup_one || reset_devices)
+ if (setup_one || reset_devices) {
max_vectors = 1;
- else {
+ retval = pci_alloc_irq_vectors(mrioc->pdev,
+ 1, max_vectors, irq_flags);
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
+ } else {
max_vectors =
- min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
+ min_t(int, mrioc->cpu_count + 1 +
+ mrioc->requested_poll_qcount, mrioc->msix_count);
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
ioc_info(mrioc,
"MSI-X vectors supported: %d, no of cores: %d,",
mrioc->msix_count, mrioc->cpu_count);
ioc_info(mrioc,
- "MSI-x vectors requested: %d\n", max_vectors);
- }
+ "MSI-x vectors requested: %d poll_queues %d\n",
+ max_vectors, mrioc->requested_poll_qcount);
+
+ desc.post_vectors = mrioc->requested_poll_qcount;
+ min_vec = desc.pre_vectors + desc.post_vectors;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
+ min_vec, max_vectors, irq_flags, &desc);
+
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
- irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
- mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
- retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
- 1, max_vectors, irq_flags, &desc);
- if (retval < 0) {
- ioc_err(mrioc, "Cannot alloc irq vectors\n");
- goto out_failed;
- }
- if (retval != max_vectors) {
- ioc_info(mrioc,
- "allocated vectors (%d) are less than configured (%d)\n",
- retval, max_vectors);
/*
* If only one MSI-x is allocated, then MSI-x 0 will be shared
* between Admin queue and operational queue
*/
- if (retval == 1)
+ if (retval == min_vec)
mrioc->op_reply_q_offset = 0;
+ else if (retval != (max_vectors)) {
+ ioc_info(mrioc,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ retval, max_vectors);
+ }
max_vectors = retval;
+ mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
+
}
+
mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
GFP_KERNEL);
if (!mrioc->intr_info) {
@@ -720,6 +849,8 @@ static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
goto out_failed;
}
}
+ if (reset_devices || !setup_one)
+ mrioc->is_intr_info_set = true;
mrioc->intr_info_count = max_vectors;
mpi3mr_ioc_enable_intr(mrioc);
return 0;
@@ -763,10 +894,10 @@ static const struct {
} mpi3mr_reset_reason_codes[] = {
{ MPI3MR_RESET_FROM_BRINGUP, "timeout in bringup" },
{ MPI3MR_RESET_FROM_FAULT_WATCH, "fault" },
- { MPI3MR_RESET_FROM_IOCTL, "application invocation" },
+ { MPI3MR_RESET_FROM_APP, "application invocation" },
{ MPI3MR_RESET_FROM_EH_HOS, "error handling" },
{ MPI3MR_RESET_FROM_TM_TIMEOUT, "TM timeout" },
- { MPI3MR_RESET_FROM_IOCTL_TIMEOUT, "IOCTL timeout" },
+ { MPI3MR_RESET_FROM_APP_TIMEOUT, "application command timeout" },
{ MPI3MR_RESET_FROM_MUR_FAILURE, "MUR failure" },
{ MPI3MR_RESET_FROM_CTLR_CLEANUP, "timeout in controller cleanup" },
{ MPI3MR_RESET_FROM_CIACTIV_FAULT, "component image activation fault" },
@@ -796,6 +927,9 @@ static const struct {
},
{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
+ { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
+ { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout"},
+ { MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT, "timeout of a SAS transport layer request" },
};
/**
@@ -860,7 +994,7 @@ static const char *mpi3mr_reset_type_name(u16 reset_type)
*
* Return: Nothing.
*/
-static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
{
u32 ioc_status, code, code1, code2, code3;
@@ -958,25 +1092,25 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
- timeout = mrioc->ready_timeout * 10;
+ timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
mpi3mr_clear_reset_history(mrioc);
- ioc_config =
- readl(&mrioc->sysif_regs->ioc_configuration);
- if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
- (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
- (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
- retval = 0;
- break;
- }
+ break;
+ }
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
}
msleep(100);
} while (--timeout);
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ retval = 0;
ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
@@ -984,32 +1118,194 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
+ * during reset/resume
+ * @mrioc: Adapter instance reference
+ *
+ * Return zero if the new IOCFacts parameters value is compatible with
+ * older values else return -EPERM
+ */
+static int
+mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+{
+ u16 dev_handle_bitmap_sz;
+ void *removepend_bitmap;
+
+ if (mrioc->facts.reply_sz > mrioc->reply_sz) {
+ ioc_err(mrioc,
+ "cannot increase reply size from %d to %d\n",
+ mrioc->reply_sz, mrioc->facts.reply_sz);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational reply queues from %d to %d\n",
+ mrioc->num_op_reply_q,
+ mrioc->facts.max_op_reply_q);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational request queues from %d to %d\n",
+ mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
+ return -EPERM;
+ }
+
+ if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
+ ioc_err(mrioc,
+ "critical error: multipath capability is enabled at the\n"
+ "\tcontroller while sas transport support is enabled at the\n"
+ "\tdriver, please reboot the system or reload the driver\n");
+
+ dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+ if (mrioc->facts.max_devhandle % 8)
+ dev_handle_bitmap_sz++;
+ if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
+ removepend_bitmap = krealloc(mrioc->removepend_bitmap,
+ dev_handle_bitmap_sz, GFP_KERNEL);
+ if (!removepend_bitmap) {
+ ioc_err(mrioc,
+ "failed to increase removepend_bitmap sz from: %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ return -EPERM;
+ }
+ memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
+ dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
+ mrioc->removepend_bitmap = removepend_bitmap;
+ ioc_info(mrioc,
+ "increased dev_handle_bitmap_sz from %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
+ }
+
+ return 0;
+}
+
+/**
* mpi3mr_bring_ioc_ready - Bring controller to ready state
* @mrioc: Adapter instance reference
*
* Set Enable IOC bit in IOC configuration register and wait for
* the controller to become ready.
*
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, appropriate error on failure.
*/
static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
{
- u32 ioc_config, timeout;
- enum mpi3mr_iocstate current_state;
+ u32 ioc_config, ioc_status, timeout;
+ int retval = 0;
+ enum mpi3mr_iocstate ioc_state;
+ u64 base_info;
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
+ ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
+ ioc_status, ioc_config, base_info);
+
+ /*The timeout value is in 2sec unit, changing it to seconds*/
+ mrioc->ready_timeout =
+ ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+ MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+
+ ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc, "controller is in %s state during detection\n",
+ mpi3mr_iocstate_name(ioc_state));
+
+ if (ioc_state == MRIOC_STATE_BECOMING_READY ||
+ ioc_state == MRIOC_STATE_RESET_REQUESTED) {
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ msleep(100);
+ } while (--timeout);
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present while waiting to reset\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc,
+ "controller is in %s state after waiting to reset\n",
+ mpi3mr_iocstate_name(ioc_state));
+ }
+
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
+ retval = mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_BRINGUP);
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (retval)
+ ioc_err(mrioc,
+ "message unit reset failed with error %d current state %s\n",
+ retval, mpi3mr_iocstate_name(ioc_state));
+ }
+ if (ioc_state != MRIOC_STATE_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc,
+ "soft reset failed with error %d\n", retval);
+ goto out_failed;
+ }
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_RESET) {
+ ioc_err(mrioc,
+ "cannot bring controller to reset state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+ goto out_failed;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ retval = mpi3mr_setup_admin_qpair(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to setup admin queues: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ ioc_info(mrioc, "bringing controller to ready state\n");
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
timeout = mrioc->ready_timeout * 10;
do {
- current_state = mpi3mr_get_iocstate(mrioc);
- if (current_state == MRIOC_STATE_READY)
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc,
+ "successfully transitioned to %s state\n",
+ mpi3mr_iocstate_name(ioc_state));
return 0;
+ }
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc,
+ "controller is not present at the bringup\n");
+ retval = -1;
+ goto out_device_not_present;
+ }
msleep(100);
} while (--timeout);
- return -1;
+out_failed:
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_err(mrioc,
+ "failed to bring to ready state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+out_device_not_present:
+ return retval;
}
/**
@@ -1026,7 +1322,6 @@ static inline bool
mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
{
if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
- (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
return true;
return false;
@@ -1049,8 +1344,10 @@ static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
return false;
fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
- if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
+ if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
+ mpi3mr_print_fault_info(mrioc);
return true;
+ }
return false;
}
@@ -1089,26 +1386,36 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
u32 reset_reason)
{
int retval = -1;
- u8 unlock_retry_count, reset_retry_count = 0;
- u32 host_diagnostic, timeout, ioc_status, ioc_config;
+ u8 unlock_retry_count = 0;
+ u32 host_diagnostic, ioc_status, ioc_config;
+ u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
- pci_cfg_access_lock(mrioc->pdev);
if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
(reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
- goto out;
+ return retval;
if (mrioc->unrecoverable)
- goto out;
-retry_reset:
- unlock_retry_count = 0;
+ return retval;
+ if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
+ retval = 0;
+ return retval;
+ }
+
+ ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ mpi3mr_reset_rc_name(reset_reason), reset_reason);
+
mpi3mr_clear_reset_history(mrioc);
do {
ioc_info(mrioc,
"Write magic sequence to unlock host diag register (retry=%d)\n",
++unlock_retry_count);
if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
- writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_err(mrioc,
+ "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ host_diagnostic);
mrioc->unrecoverable = 1;
- goto out;
+ return retval;
}
writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
@@ -1133,31 +1440,26 @@ retry_reset:
} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
- ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
- mpi3mr_reset_type_name(reset_type),
- mpi3mr_reset_rc_name(reset_reason), reset_reason);
writel(host_diagnostic | reset_type,
&mrioc->sysif_regs->host_diagnostic);
- timeout = mrioc->ready_timeout * 10;
- if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
+ switch (reset_type) {
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- if (ioc_status &
- MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ && mpi3mr_soft_reset_success(ioc_status, ioc_config)
+ ) {
mpi3mr_clear_reset_history(mrioc);
- ioc_config =
- readl(&mrioc->sysif_regs->ioc_configuration);
- if (mpi3mr_soft_reset_success(ioc_status,
- ioc_config)) {
- retval = 0;
- break;
- }
+ retval = 0;
+ break;
}
msleep(100);
} while (--timeout);
- writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
- &mrioc->sysif_regs->write_sequence);
- } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
@@ -1166,28 +1468,22 @@ retry_reset:
}
msleep(100);
} while (--timeout);
- mpi3mr_clear_reset_history(mrioc);
- writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
- &mrioc->sysif_regs->write_sequence);
- }
- if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) {
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
- ioc_info(mrioc,
- "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n",
- reset_retry_count, ioc_status, ioc_config);
- goto retry_reset;
+ break;
+ default:
+ break;
}
-out:
- pci_cfg_access_unlock(mrioc->pdev);
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n",
- (!retval) ? "successful" : "failed", ioc_status,
+ "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ (!retval)?"successful":"failed", ioc_status,
ioc_config);
+ if (retval)
+ mrioc->unrecoverable = 1;
return retval;
}
@@ -1275,10 +1571,10 @@ static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
MPI3MR_MAX_SEG_LIST_SIZE,
mrioc->req_qinfo[q_idx].q_segment_list,
mrioc->req_qinfo[q_idx].q_segment_list_dma);
- mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
+ mrioc->req_qinfo[q_idx].q_segment_list = NULL;
}
} else
- size = mrioc->req_qinfo[q_idx].num_requests *
+ size = mrioc->req_qinfo[q_idx].segment_qd *
mrioc->facts.op_req_sz;
for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
@@ -1351,10 +1647,11 @@ static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
{
struct mpi3_delete_reply_queue_request delq_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
int retval = 0;
u16 reply_qid = 0, midx;
- reply_qid = mrioc->op_reply_qinfo[qidx].qid;
+ reply_qid = op_reply_q->qid;
midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
@@ -1364,6 +1661,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
goto out;
}
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
+ mrioc->active_poll_qcount--;
+
memset(&delq_req, 0, sizeof(delq_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -1389,13 +1689,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue DelRepQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "delete reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
- mrioc->unrecoverable = 1;
-
retval = -1;
goto out_unlock;
}
@@ -1565,6 +1861,8 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
reply_qid = qidx + 1;
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ if (!mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
@@ -1592,8 +1890,26 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
create_req.queue_id = cpu_to_le16(reply_qid);
- create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
- create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
+
+ if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
+ op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
+ else
+ op_reply_q->qtype = MPI3MR_POLL_QUEUE;
+
+ if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
+ create_req.flags =
+ MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
+ create_req.msix_index =
+ cpu_to_le16(mrioc->intr_info[midx].msix_index);
+ } else {
+ create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
+ ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
+ reply_qid, midx);
+ if (!mrioc->active_poll_qcount)
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev,
+ mrioc->intr_info_count - 1));
+ }
+
if (mrioc->enable_segqueue) {
create_req.flags |=
MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
@@ -1615,12 +1931,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "CreateRepQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "create reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -1634,7 +1947,11 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
goto out_unlock;
}
op_reply_q->qid = reply_qid;
- mrioc->intr_info[midx].op_reply_q = op_reply_q;
+ if (midx < mrioc->intr_info_count)
+ mrioc->intr_info[midx].op_reply_q = op_reply_q;
+
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
+ mrioc->active_poll_qcount++;
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
@@ -1722,12 +2039,9 @@ static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "CreateReqQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- if (mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
- MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT))
- mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "create request queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -1771,8 +2085,13 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
mrioc->intr_info_count - mrioc->op_reply_q_offset;
if (!mrioc->num_queues)
mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
- num_queues = mrioc->num_queues;
- ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
+ /*
+ * During reset set the num_queues to the number of queues
+ * that was set before the reset.
+ */
+ num_queues = mrioc->num_op_reply_q ?
+ mrioc->num_op_reply_q : mrioc->num_queues;
+ ioc_info(mrioc, "trying to create %d operational queue pairs\n",
num_queues);
if (!mrioc->req_qinfo) {
@@ -1814,8 +2133,10 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
goto out_failed;
}
mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
- ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
- mrioc->num_op_reply_q);
+ ioc_info(mrioc,
+ "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
+ mrioc->num_op_reply_q, mrioc->default_qcount,
+ mrioc->active_poll_qcount);
return retval;
out_failed:
@@ -1863,7 +2184,7 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
if (mpi3mr_check_req_qfull(op_req_q)) {
midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
reply_qidx, mrioc->op_reply_q_offset);
- mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
+ mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
if (mpi3mr_check_req_qfull(op_req_q)) {
retval = -EAGAIN;
@@ -1888,9 +2209,13 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
pi = 0;
op_req_q->pi = pi;
+#ifndef CONFIG_PREEMPT_RT
if (atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios)
> MPI3MR_IRQ_POLL_TRIGGER_IOCOUNT)
mrioc->op_reply_qinfo[reply_qidx].enable_irq_poll = true;
+#else
+ atomic_inc_return(&mrioc->op_reply_qinfo[reply_qidx].pend_ios);
+#endif
writel(op_req_q->pi,
&mrioc->sysif_regs->oper_queue_indexes[reply_qidx].producer_index);
@@ -1901,6 +2226,53 @@ out:
}
/**
+ * mpi3mr_check_rh_fault_ioc - check reset history and fault
+ * controller
+ * @mrioc: Adapter instance reference
+ * @reason_code: reason code for the fault.
+ *
+ * This routine will save snapdump and fault the controller with
+ * the given reason code if it is not already in the fault or
+ * not asynchronosuly reset. This will be used to handle
+ * initilaization time faults/resets/timeout as in those cases
+ * immediate soft reset invocation is not required.
+ *
+ * Return: None.
+ */
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
+{
+ u32 ioc_status, host_diagnostic, timeout;
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc, "controller is unrecoverable\n");
+ return;
+ }
+
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "controller is not present\n");
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ return;
+ }
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ reason_code);
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ do {
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+}
+
+/**
* mpi3mr_sync_timestamp - Issue time stamp sync request
* @mrioc: Adapter reference
*
@@ -1945,8 +2317,9 @@ static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
mrioc->init_cmds.is_waiting = 0;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
retval = -1;
goto out_unlock;
}
@@ -1969,6 +2342,91 @@ out:
}
/**
+ * mpi3mr_print_pkg_ver - display controller fw package version
+ * @mrioc: Adapter reference
+ *
+ * Retrieve firmware package version from the component image
+ * header of the controller flash and display it.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ci_upload_request ci_upload;
+ int retval = -1;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ struct mpi3_ci_manifest_mpi *manifest;
+ u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(&ci_upload, 0, sizeof(ci_upload));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "sending get package version failed due to command in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
+ ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
+ ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
+ ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
+ ci_upload.segment_size = cpu_to_le32(data_len);
+
+ mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
+ data_dma);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
+ sizeof(ci_upload), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting get package version failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "get package version timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ == MPI3_IOCSTATUS_SUCCESS) {
+ manifest = (struct mpi3_ci_manifest_mpi *) data;
+ if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
+ ioc_info(mrioc,
+ "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
+ manifest->package_version.gen_major,
+ manifest->package_version.gen_minor,
+ manifest->package_version.phase_major,
+ manifest->package_version.phase_minor,
+ manifest->package_version.customer_id,
+ manifest->package_version.build_num);
+ }
+ }
+ retval = 0;
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data,
+ data_dma);
+ return retval;
+}
+
+/**
* mpi3mr_watchdog_work - watchdog thread to monitor faults
* @work: work struct
*
@@ -1984,50 +2442,79 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
container_of(work, struct mpi3mr_ioc, watchdog_work.work);
unsigned long flags;
enum mpi3mr_iocstate ioc_state;
- u32 fault, host_diagnostic;
+ u32 fault, host_diagnostic, ioc_status;
+ u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!mrioc->unrecoverable && !pci_device_is_present(mrioc->pdev)) {
+ ioc_err(mrioc, "watchdog could not detect the controller\n");
+ mrioc->unrecoverable = 1;
+ }
+
+ if (mrioc->unrecoverable) {
+ ioc_err(mrioc,
+ "flush pending commands for unrecoverable controller\n");
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ return;
+ }
if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
+ if ((mrioc->prepare_for_reset) &&
+ ((mrioc->prepare_for_reset_timeout_counter++) >=
+ MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
+ return;
+ }
+
/*Check for fault state every one second and issue Soft reset*/
ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state == MRIOC_STATE_FAULT) {
- fault = readl(&mrioc->sysif_regs->fault) &
- MPI3_SYSIF_FAULT_CODE_MASK;
- host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
- if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
- if (!mrioc->diagsave_timeout) {
- mpi3mr_print_fault_info(mrioc);
- ioc_warn(mrioc, "Diag save in progress\n");
- }
- if ((mrioc->diagsave_timeout++) <=
- MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
- goto schedule_work;
- } else
- mpi3mr_print_fault_info(mrioc);
- mrioc->diagsave_timeout = 0;
+ if (ioc_state != MRIOC_STATE_FAULT)
+ goto schedule_work;
- if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) {
- ioc_info(mrioc,
- "Factory Reset fault occurred marking controller as unrecoverable"
- );
- mrioc->unrecoverable = 1;
- goto out;
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
+ if (!mrioc->diagsave_timeout) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_warn(mrioc, "diag save in progress\n");
}
+ if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
+ goto schedule_work;
+ }
- if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
- (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
- (mrioc->reset_in_progress))
- goto out;
- if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
- else
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_FAULT_WATCH, 0);
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->diagsave_timeout = 0;
+
+ switch (fault) {
+ case MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED:
+ case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
+ ioc_warn(mrioc,
+ "controller requires system power cycle, marking controller as unrecoverable\n");
+ mrioc->unrecoverable = 1;
+ goto schedule_work;
+ case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
+ return;
+ case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
+ reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
+ break;
+ default:
+ break;
}
+ mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
+ return;
schedule_work:
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
@@ -2036,7 +2523,6 @@ schedule_work:
&mrioc->watchdog_work,
msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
-out:
return;
}
@@ -2097,41 +2583,6 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
}
/**
- * mpi3mr_kill_ioc - Kill the controller
- * @mrioc: Adapter instance reference
- * @reason: reason for the failure.
- *
- * If fault debug is enabled, display the fault info else issue
- * diag fault and freeze the system for controller debug
- * purpose.
- *
- * Return: Nothing.
- */
-static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason)
-{
- enum mpi3mr_iocstate ioc_state;
-
- if (!mrioc->fault_dbg)
- return;
-
- dump_stack();
-
- ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state == MRIOC_STATE_FAULT)
- mpi3mr_print_fault_info(mrioc);
- else {
- ioc_err(mrioc, "Firmware is halted due to the reason %d\n",
- reason);
- mpi3mr_diagfault_reset_handler(mrioc, reason);
- }
- if (mrioc->fault_dbg == 2)
- for (;;)
- ;
- else
- panic("panic in %s\n", __func__);
-}
-
-/**
* mpi3mr_setup_admin_qpair - Setup admin queue pair
* @mrioc: Adapter instance reference
*
@@ -2258,12 +2709,9 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue IOCFacts: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "ioc_facts timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -2277,6 +2725,7 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
goto out_unlock;
}
memcpy(facts_data, (u8 *)data, data_len);
+ mpi3mr_process_factsdata(mrioc, facts_data);
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
@@ -2369,19 +2818,18 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
- mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_request);
+ mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
mrioc->facts.product_id = le16_to_cpu(facts_data->product_id);
mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
- mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds);
mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
- mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds);
- mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds);
+ mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
+ mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
mrioc->facts.max_pcie_switches =
- le16_to_cpu(facts_data->max_pc_ie_switches);
+ le16_to_cpu(facts_data->max_pcie_switches);
mrioc->facts.max_sasexpanders =
le16_to_cpu(facts_data->max_sas_expanders);
mrioc->facts.max_sasinitiators =
@@ -2411,26 +2859,47 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.shutdown_timeout =
le16_to_cpu(facts_data->shutdown_timeout);
+ mrioc->facts.max_dev_per_tg =
+ facts_data->max_devices_per_throttle_group;
+ mrioc->facts.io_throttle_data_length =
+ le16_to_cpu(facts_data->io_throttle_data_length);
+ mrioc->facts.max_io_throttle_group =
+ le16_to_cpu(facts_data->max_io_throttle_group);
+ mrioc->facts.io_throttle_low = le16_to_cpu(facts_data->io_throttle_low);
+ mrioc->facts.io_throttle_high =
+ le16_to_cpu(facts_data->io_throttle_high);
+
+ /* Store in 512b block count */
+ if (mrioc->facts.io_throttle_data_length)
+ mrioc->io_throttle_data_length =
+ (mrioc->facts.io_throttle_data_length * 2 * 4);
+ else
+ /* set the length to 1MB + 1K to disable throttle */
+ mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+
+ mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
+ mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
+
ioc_info(mrioc, "ioc_num(%d), maxopQ(%d), maxopRepQ(%d), maxdh(%d),",
mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
ioc_info(mrioc,
- "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
+ "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
- mrioc->facts.max_pds, mrioc->facts.max_msix_vectors,
- mrioc->facts.max_perids);
+ mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
mrioc->facts.sge_mod_shift);
ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
mrioc->facts.dma_mask, (facts_flags &
MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
-
- mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
-
- if (reset_devices)
- mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
- MPI3MR_HOST_IOS_KDUMP);
+ ioc_info(mrioc,
+ "max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
+ mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
+ ioc_info(mrioc,
+ "io_throttle_data_len(%dKiB), io_throttle_high(%dMiB), io_throttle_low(%dMiB)\n",
+ mrioc->facts.io_throttle_data_length * 4,
+ mrioc->facts.io_throttle_high, mrioc->facts.io_throttle_low);
}
/**
@@ -2446,26 +2915,48 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
u32 sz, i;
- dma_addr_t phy_addr;
if (mrioc->init_cmds.reply)
- goto post_reply_sbuf;
+ return retval;
- mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
if (!mrioc->init_cmds.reply)
goto out_failed;
+ mrioc->bsg_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->bsg_cmds.reply)
+ goto out_failed;
+
+ mrioc->transport_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->transport_cmds.reply)
+ goto out_failed;
+
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
- mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
+ mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
GFP_KERNEL);
if (!mrioc->dev_rmhs_cmds[i].reply)
goto out_failed;
}
- mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds[i].reply)
+ goto out_failed;
+ }
+
+ mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
if (!mrioc->host_tm_cmds.reply)
goto out_failed;
+ mrioc->pel_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->pel_cmds.reply)
+ goto out_failed;
+
+ mrioc->pel_abort_cmd.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
+ if (!mrioc->pel_abort_cmd.reply)
+ goto out_failed;
+
mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
if (mrioc->facts.max_devhandle % 8)
mrioc->dev_handle_bitmap_sz++;
@@ -2482,13 +2973,21 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->devrem_bitmap)
goto out_failed;
+ mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
+ if (MPI3MR_NUM_EVTACKCMD % 8)
+ mrioc->evtack_cmds_bitmap_sz++;
+ mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds_bitmap)
+ goto out_failed;
+
mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
/* reply buffer pool, 16 byte align */
- sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
&mrioc->pdev->dev, sz, 16, 0);
if (!mrioc->reply_buf_pool) {
@@ -2517,7 +3016,7 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
goto out_failed;
/* sense buffer pool, 4 byte align */
- sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
&mrioc->pdev->dev, sz, 4, 0);
if (!mrioc->sense_buf_pool) {
@@ -2542,21 +3041,42 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->sense_buf_q)
goto out_failed;
-post_reply_sbuf:
- sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ return retval;
+
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpimr_initialize_reply_sbuf_queues - initialize reply sense
+ * buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Helper function to initialize reply and sense buffers along
+ * with some debug prints.
+ *
+ * Return: None.
+ */
+static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
+{
+ u32 sz, i;
+ dma_addr_t phy_addr;
+
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
ioc_info(mrioc,
"reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
- mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
+ mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
(sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
sz = mrioc->reply_free_qsz * 8;
ioc_info(mrioc,
"reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
(unsigned long long)mrioc->reply_free_q_dma);
- sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
ioc_info(mrioc,
"sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
- mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
+ mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
(sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
sz = mrioc->sense_buf_q_sz * 8;
ioc_info(mrioc,
@@ -2566,20 +3086,15 @@ post_reply_sbuf:
/* initialize Reply buffer Queue */
for (i = 0, phy_addr = mrioc->reply_buf_dma;
- i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
+ i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
mrioc->reply_free_q[i] = cpu_to_le64(0);
/* initialize Sense Buffer Queue */
for (i = 0, phy_addr = mrioc->sense_buf_dma;
- i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ)
+ i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
mrioc->sense_buf_q[i] = cpu_to_le64(0);
- return retval;
-
-out_failed:
- retval = -1;
- return retval;
}
/**
@@ -2606,6 +3121,8 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
retval = -1;
goto out;
}
+ mpimr_initialize_reply_sbuf_queues(mrioc);
+
drv_info->information_length = cpu_to_le32(data_len);
strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
@@ -2639,7 +3156,7 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
iocinit_req.reply_free_queue_address =
cpu_to_le64(mrioc->reply_free_q_dma);
- iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ);
+ iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
iocinit_req.sense_buffer_free_queue_depth =
cpu_to_le16(mrioc->sense_buf_q_sz);
iocinit_req.sense_buffer_free_queue_address =
@@ -2659,12 +3176,9 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
- mrioc->unrecoverable = 1;
- ioc_err(mrioc, "Issue IOCInit: command timed out\n");
+ ioc_err(mrioc, "ioc_init timed out\n");
retval = -1;
goto out_unlock;
}
@@ -2678,6 +3192,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
goto out_unlock;
}
+ mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+
+ mrioc->sbq_host_index = mrioc->num_sense_bufs;
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
@@ -2755,12 +3276,9 @@ static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "event notification timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -2782,17 +3300,17 @@ out:
}
/**
- * mpi3mr_send_event_ack - Send event acknowledgment
+ * mpi3mr_process_event_ack - Process event acknowledgment
* @mrioc: Adapter instance reference
* @event: MPI3 event ID
- * @event_ctx: Event context
+ * @event_ctx: event context
*
* Send event acknowledgment through admin queue and wait for
* it to complete.
*
* Return: 0 on success, non-zero on failures.
*/
-int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
u32 event_ctx)
{
struct mpi3_event_ack_request evtack_req;
@@ -2825,8 +3343,9 @@ int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
retval = -1;
goto out_unlock;
}
@@ -2863,6 +3382,9 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
u32 sz, i;
u16 num_chains;
+ if (mrioc->chain_sgl_list)
+ return retval;
+
num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
@@ -2918,10 +3440,13 @@ out_failed:
static void mpi3mr_port_enable_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd)
{
- drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
- mrioc->scan_failed = drv_cmd->ioc_status;
mrioc->scan_started = 0;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ else
+ mrioc->scan_failed = drv_cmd->ioc_status;
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
}
/**
@@ -2966,29 +3491,28 @@ int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
goto out_unlock;
}
- if (!async) {
- wait_for_completion_timeout(&mrioc->init_cmds.done,
- (pe_timeout * HZ));
- if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue PortEnable: command timed out\n");
- retval = -1;
- mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
- MPI3MR_RESET_FROM_PE_TIMEOUT);
- mrioc->unrecoverable = 1;
- goto out_unlock;
- }
- mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+ if (async) {
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "port enable timed out\n");
+ retval = -1;
+ mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
+ goto out_unlock;
}
+ mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+
out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
out:
return retval;
}
-/* Protocol type to name mapper structure*/
+/* Protocol type to name mapper structure */
static const struct {
u8 protocol;
char *name;
@@ -3004,6 +3528,7 @@ static const struct {
char *name;
} mpi3mr_capabilities[] = {
{ MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE, "RAID" },
+ { MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED, "MultiPath" },
};
/**
@@ -3181,6 +3706,10 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
mrioc->sysif_regs, memap_sz);
ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
mrioc->msix_count);
+
+ if (!reset_devices && poll_queues > 0)
+ mrioc->requested_poll_qcount = min_t(int, poll_queues,
+ mrioc->msix_count - 2);
return retval;
out_failed:
@@ -3189,9 +3718,48 @@ out_failed:
}
/**
+ * mpi3mr_enable_events - Enable required events
+ * @mrioc: Adapter instance reference
+ *
+ * This routine unmasks the events required by the driver by
+ * sennding appropriate event mask bitmapt through an event
+ * notification request.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 i;
+
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+ if (retval)
+ ioc_err(mrioc, "failed to issue event notification %d\n",
+ retval);
+ return retval;
+}
+
+/**
* mpi3mr_init_ioc - Initialize the controller
* @mrioc: Adapter instance reference
- * @init_type: Flag to indicate is the init_type
*
* This the controller initialization routine, executed either
* after soft reset or from pci probe callback.
@@ -3204,227 +3772,334 @@ out_failed:
*
* Return: 0 on success and non-zero on failure.
*/
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
- enum mpi3mr_iocstate ioc_state;
- u64 base_info;
- u32 timeout;
- u32 ioc_status, ioc_config, i;
+ u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
+ u32 sz;
- mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
- mrioc->change_count = 0;
- if (init_type == MPI3MR_IT_INIT) {
- mrioc->cpu_count = num_online_cpus();
- retval = mpi3mr_setup_resources(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to setup resources:error %d\n",
- retval);
- goto out_nocleanup;
- }
+retry_init:
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ retval);
+ goto out_failed_noretry;
}
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
- ioc_info(mrioc, "SOD status %x configuration %x\n",
- ioc_status, ioc_config);
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
+ retval);
+ goto out_failed;
+ }
- base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
- ioc_info(mrioc, "SOD base_info %llx\n", base_info);
+ mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
- /*The timeout value is in 2sec unit, changing it to seconds*/
- mrioc->ready_timeout =
- ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
- MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+ mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
+ atomic_set(&mrioc->pend_large_data_sz, 0);
- ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout);
+ if (reset_devices)
+ mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
+ MPI3MR_HOST_IOS_KDUMP);
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc, "IOC in %s state during detection\n",
- mpi3mr_iocstate_name(ioc_state));
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED)) {
+ mrioc->sas_transport_enabled = 1;
+ mrioc->scsi_device_channel = 1;
+ mrioc->shost->max_channel = 1;
+ mrioc->shost->transportt = mpi3mr_transport_template;
+ }
- if (ioc_state == MRIOC_STATE_BECOMING_READY ||
- ioc_state == MRIOC_STATE_RESET_REQUESTED) {
- timeout = mrioc->ready_timeout * 10;
- do {
- msleep(100);
- } while (--timeout);
+ mrioc->reply_sz = mrioc->facts.reply_sz;
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc,
- "IOC in %s state after waiting for reset time\n",
- mpi3mr_iocstate_name(ioc_state));
+ retval = mpi3mr_check_reset_dma_mask(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Resetting dma mask failed %d\n",
+ retval);
+ goto out_failed_noretry;
}
- if (ioc_state == MRIOC_STATE_READY) {
- retval = mpi3mr_issue_and_process_mur(mrioc,
- MPI3MR_RESET_FROM_BRINGUP);
- if (retval) {
- ioc_err(mrioc, "Failed to MU reset IOC error %d\n",
- retval);
- }
- ioc_state = mpi3mr_get_iocstate(mrioc);
+ mpi3mr_print_ioc_info(mrioc);
+
+ dprint_init(mrioc, "allocating config page buffers\n");
+ mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+ MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+ if (!mrioc->cfg_page)
+ goto out_failed_noretry;
+
+ mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
+
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
}
- if (ioc_state != MRIOC_STATE_RESET) {
- mpi3mr_print_fault_info(mrioc);
- retval = mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
- MPI3MR_RESET_FROM_BRINGUP);
- if (retval) {
- ioc_err(mrioc,
- "%s :Failed to soft reset IOC error %d\n",
- __func__, retval);
- goto out_failed;
- }
+
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
}
- ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state != MRIOC_STATE_RESET) {
- retval = -1;
- ioc_err(mrioc, "Cannot bring IOC to reset state\n");
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
+ retval);
goto out_failed;
}
- retval = mpi3mr_setup_admin_qpair(mrioc);
+ retval = mpi3mr_print_pkg_ver(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to get package version\n");
+ goto out_failed;
+ }
+
+ retval = mpi3mr_setup_isr(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_create_op_queues(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to setup admin Qs: error %d\n",
+ ioc_err(mrioc, "Failed to create OpQueues error %d\n",
retval);
goto out_failed;
}
- retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (!mrioc->pel_seqnum_virt) {
+ dprint_init(mrioc, "allocating memory for pel_seqnum_virt\n");
+ mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+ mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+ GFP_KERNEL);
+ if (!mrioc->pel_seqnum_virt) {
+ retval = -ENOMEM;
+ goto out_failed_noretry;
+ }
+ }
+
+ if (!mrioc->throttle_groups && mrioc->num_io_throttle_group) {
+ dprint_init(mrioc, "allocating memory for throttle groups\n");
+ sz = sizeof(struct mpi3mr_throttle_group_info);
+ mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+ if (!mrioc->throttle_groups)
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_enable_events(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ ioc_err(mrioc, "failed to enable events %d\n",
retval);
goto out_failed;
}
- if (init_type != MPI3MR_IT_RESET) {
+ ioc_info(mrioc, "controller initialization completed successfully\n");
+ return retval;
+out_failed:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
+ retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller initialization failed\n");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_reinit_ioc - Re-Initialize the controller
+ * @mrioc: Adapter instance reference
+ * @is_resume: Called from resume or reset path
+ *
+ * This the controller re-initialization routine, executed from
+ * the soft reset handler or resume callback. Creates
+ * operational reply queue pairs, allocate required memory for
+ * reply pool, sense buffer pool, issue IOC init request to the
+ * firmware, unmask the events and issue port enable to discover
+ * SAS/SATA/NVMe devices and RAID volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+{
+ int retval = 0;
+ u8 retry = 0;
+ struct mpi3_ioc_facts_data facts_data;
+ u32 pe_timeout, ioc_status;
+
+retry_init:
+ pe_timeout =
+ (MPI3MR_PORTENABLE_TIMEOUT / MPI3MR_PORTENABLE_POLL_INTERVAL);
+
+ dprint_reset(mrioc, "bringing up the controller to ready state\n");
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to bring to ready state\n");
+ goto out_failed_noretry;
+ }
+
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
if (retval) {
- ioc_err(mrioc, "Failed to setup ISR error %d\n",
- retval);
- goto out_failed;
+ ioc_err(mrioc, "failed to setup ISR\n");
+ goto out_failed_noretry;
}
} else
mpi3mr_ioc_enable_intr(mrioc);
+ dprint_reset(mrioc, "getting ioc_facts\n");
retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
if (retval) {
- ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
- retval);
+ ioc_err(mrioc, "failed to get ioc_facts\n");
goto out_failed;
}
- mpi3mr_process_factsdata(mrioc, &facts_data);
- if (init_type == MPI3MR_IT_INIT) {
- retval = mpi3mr_check_reset_dma_mask(mrioc);
- if (retval) {
- ioc_err(mrioc, "Resetting dma mask failed %d\n",
- retval);
- goto out_failed;
- }
+ dprint_reset(mrioc, "validating ioc_facts\n");
+ retval = mpi3mr_revalidate_factsdata(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
+ goto out_failed_noretry;
}
mpi3mr_print_ioc_info(mrioc);
- retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ dprint_reset(mrioc, "sending ioc_init\n");
+ retval = mpi3mr_issue_iocinit(mrioc);
if (retval) {
- ioc_err(mrioc,
- "%s :Failed to allocated reply sense buffers %d\n",
- __func__, retval);
+ ioc_err(mrioc, "failed to send ioc_init\n");
goto out_failed;
}
- if (init_type == MPI3MR_IT_INIT) {
- retval = mpi3mr_alloc_chain_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
- retval);
- goto out_failed;
- }
- }
-
- retval = mpi3mr_issue_iocinit(mrioc);
+ dprint_reset(mrioc, "getting package version\n");
+ retval = mpi3mr_print_pkg_ver(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
- retval);
+ ioc_err(mrioc, "failed to get package version\n");
goto out_failed;
}
- mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
- writel(mrioc->reply_free_queue_host_index,
- &mrioc->sysif_regs->reply_free_host_index);
- mrioc->sbq_host_index = mrioc->num_sense_bufs;
- writel(mrioc->sbq_host_index,
- &mrioc->sysif_regs->sense_buffer_free_host_index);
-
- if (init_type != MPI3MR_IT_RESET) {
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up multiple ISR\n");
retval = mpi3mr_setup_isr(mrioc, 0);
if (retval) {
- ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
- retval);
- goto out_failed;
+ ioc_err(mrioc, "failed to re-setup ISR\n");
+ goto out_failed_noretry;
}
}
+ dprint_reset(mrioc, "creating operational queue pairs\n");
retval = mpi3mr_create_op_queues(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to create OpQueues error %d\n",
- retval);
+ ioc_err(mrioc, "failed to create operational queue pairs\n");
goto out_failed;
}
- if ((init_type != MPI3MR_IT_INIT) &&
- (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
- retval = -1;
+ if (!mrioc->pel_seqnum_virt) {
+ dprint_reset(mrioc, "allocating memory for pel_seqnum_virt\n");
+ mrioc->pel_seqnum_sz = sizeof(struct mpi3_pel_seq);
+ mrioc->pel_seqnum_virt = dma_alloc_coherent(&mrioc->pdev->dev,
+ mrioc->pel_seqnum_sz, &mrioc->pel_seqnum_dma,
+ GFP_KERNEL);
+ if (!mrioc->pel_seqnum_virt) {
+ retval = -ENOMEM;
+ goto out_failed_noretry;
+ }
+ }
+
+ if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
ioc_err(mrioc,
- "Cannot create minimum number of OpQueues expected:%d created:%d\n",
+ "cannot create minimum number of operational queues expected:%d created:%d\n",
mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
- goto out_failed;
+ goto out_failed_noretry;
}
- for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
- mrioc->event_masks[i] = -1;
+ dprint_reset(mrioc, "enabling events\n");
+ retval = mpi3mr_enable_events(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to enable events\n");
+ goto out_failed;
+ }
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+ mrioc->device_refresh_on = 1;
+ mpi3mr_add_event_wait_for_device_refresh(mrioc);
- retval = mpi3mr_issue_event_notification(mrioc);
+ ioc_info(mrioc, "sending port enable\n");
+ retval = mpi3mr_issue_port_enable(mrioc, 1);
if (retval) {
- ioc_err(mrioc, "Failed to issue event notification %d\n",
- retval);
+ ioc_err(mrioc, "failed to issue port enable\n");
goto out_failed;
}
-
- if (init_type != MPI3MR_IT_INIT) {
- ioc_info(mrioc, "Issuing Port Enable\n");
- retval = mpi3mr_issue_port_enable(mrioc, 0);
- if (retval) {
- ioc_err(mrioc, "Failed to issue port enable %d\n",
- retval);
+ do {
+ ssleep(MPI3MR_PORTENABLE_POLL_INTERVAL);
+ if (mrioc->init_cmds.state == MPI3MR_CMD_NOTUSED)
+ break;
+ if (!pci_device_is_present(mrioc->pdev))
+ mrioc->unrecoverable = 1;
+ if (mrioc->unrecoverable) {
+ retval = -1;
+ goto out_failed_noretry;
+ }
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
goto out_failed;
}
- }
- return retval;
+ } while (--pe_timeout);
+ if (!pe_timeout) {
+ ioc_err(mrioc, "port enable timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ goto out_failed;
+ } else if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable completed successfully\n");
+
+ ioc_info(mrioc, "controller %s completed successfully\n",
+ (is_resume)?"resume":"re-initialization");
+ return retval;
out_failed:
- if (init_type == MPI3MR_IT_INIT)
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
- else
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
-out_nocleanup:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
+ (is_resume)?"resume":"re-initialization", retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller %s is failed\n",
+ (is_resume)?"resume":"re-initialization");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
return retval;
}
@@ -3487,18 +4162,39 @@ static void mpi3mr_memset_op_req_q_buffers(struct mpi3mr_ioc *mrioc, u16 qidx)
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
{
u16 i;
+ struct mpi3mr_throttle_group_info *tg;
- memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
- memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
-
- memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
- memset(mrioc->host_tm_cmds.reply, 0,
- sizeof(*mrioc->host_tm_cmds.reply));
- for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
- memset(mrioc->dev_rmhs_cmds[i].reply, 0,
- sizeof(*mrioc->dev_rmhs_cmds[i].reply));
- memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
- memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ mrioc->change_count = 0;
+ mrioc->active_poll_qcount = 0;
+ mrioc->default_qcount = 0;
+ if (mrioc->admin_req_base)
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ if (mrioc->admin_reply_base)
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+
+ if (mrioc->init_cmds.reply) {
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+ memset(mrioc->bsg_cmds.reply, 0,
+ sizeof(*mrioc->bsg_cmds.reply));
+ memset(mrioc->host_tm_cmds.reply, 0,
+ sizeof(*mrioc->host_tm_cmds.reply));
+ memset(mrioc->pel_cmds.reply, 0,
+ sizeof(*mrioc->pel_cmds.reply));
+ memset(mrioc->pel_abort_cmd.reply, 0,
+ sizeof(*mrioc->pel_abort_cmd.reply));
+ memset(mrioc->transport_cmds.reply, 0,
+ sizeof(*mrioc->transport_cmds.reply));
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ memset(mrioc->dev_rmhs_cmds[i].reply, 0,
+ sizeof(*mrioc->dev_rmhs_cmds[i].reply));
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ memset(mrioc->evtack_cmds[i].reply, 0,
+ sizeof(*mrioc->evtack_cmds[i].reply));
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ memset(mrioc->evtack_cmds_bitmap, 0,
+ mrioc->evtack_cmds_bitmap_sz);
+ }
for (i = 0; i < mrioc->num_queues; i++) {
mrioc->op_reply_qinfo[i].qid = 0;
@@ -3517,6 +4213,22 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
spin_lock_init(&mrioc->req_qinfo[i].q_lock);
mpi3mr_memset_op_req_q_buffers(mrioc, i);
}
+
+ atomic_set(&mrioc->pend_large_data_sz, 0);
+ if (mrioc->throttle_groups) {
+ tg = mrioc->throttle_groups;
+ for (i = 0; i < mrioc->num_io_throttle_group; i++, tg++) {
+ tg->id = 0;
+ tg->fw_qd = 0;
+ tg->modified_qd = 0;
+ tg->io_divert = 0;
+ tg->need_qd_reduction = 0;
+ tg->high = 0;
+ tg->low = 0;
+ tg->qd_reduction = 0;
+ atomic_set(&tg->pend_large_data_sz, 0);
+ }
+ }
}
/**
@@ -3527,11 +4239,13 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*
* Return: Nothing.
*/
-static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
u16 i;
struct mpi3mr_intr_info *intr_info;
+ mpi3mr_free_enclosure_list(mrioc);
+
if (mrioc->sense_buf_pool) {
if (mrioc->sense_buf)
dma_pool_free(mrioc->sense_buf_pool, mrioc->sense_buf,
@@ -3588,18 +4302,38 @@ static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
kfree(mrioc->init_cmds.reply);
mrioc->init_cmds.reply = NULL;
+ kfree(mrioc->bsg_cmds.reply);
+ mrioc->bsg_cmds.reply = NULL;
+
kfree(mrioc->host_tm_cmds.reply);
mrioc->host_tm_cmds.reply = NULL;
+ kfree(mrioc->pel_cmds.reply);
+ mrioc->pel_cmds.reply = NULL;
+
+ kfree(mrioc->pel_abort_cmd.reply);
+ mrioc->pel_abort_cmd.reply = NULL;
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ kfree(mrioc->evtack_cmds[i].reply);
+ mrioc->evtack_cmds[i].reply = NULL;
+ }
+
kfree(mrioc->removepend_bitmap);
mrioc->removepend_bitmap = NULL;
kfree(mrioc->devrem_bitmap);
mrioc->devrem_bitmap = NULL;
+ kfree(mrioc->evtack_cmds_bitmap);
+ mrioc->evtack_cmds_bitmap = NULL;
+
kfree(mrioc->chain_bitmap);
mrioc->chain_bitmap = NULL;
+ kfree(mrioc->transport_cmds.reply);
+ mrioc->transport_cmds.reply = NULL;
+
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
kfree(mrioc->dev_rmhs_cmds[i].reply);
mrioc->dev_rmhs_cmds[i].reply = NULL;
@@ -3631,6 +4365,16 @@ static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
mrioc->admin_req_base, mrioc->admin_req_dma);
mrioc->admin_req_base = NULL;
}
+
+ if (mrioc->pel_seqnum_virt) {
+ dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+ mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+ mrioc->pel_seqnum_virt = NULL;
+ }
+
+ kfree(mrioc->logdata_buf);
+ mrioc->logdata_buf = NULL;
+
}
/**
@@ -3663,7 +4407,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
- ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN;
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
@@ -3699,21 +4443,17 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
/**
* mpi3mr_cleanup_ioc - Cleanup controller
* @mrioc: Adapter instance reference
- * @reason: Cleanup reason
*
* controller cleanup handler, Message unit reset or soft reset
- * and shutdown notification is issued to the controller and the
- * associated memory resources are freed.
+ * and shutdown notification is issued to the controller.
*
* Return: Nothing.
*/
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
{
enum mpi3mr_iocstate ioc_state;
- if (reason == MPI3MR_COMPLETE_CLEANUP)
- mpi3mr_stop_watchdog(mrioc);
-
+ dprint_exit(mrioc, "cleaning up the controller\n");
mpi3mr_ioc_disable_intr(mrioc);
ioc_state = mpi3mr_get_iocstate(mrioc);
@@ -3725,15 +4465,9 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
mpi3mr_issue_reset(mrioc,
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
MPI3MR_RESET_FROM_MUR_FAILURE);
-
- if (reason != MPI3MR_REINIT_FAILURE)
- mpi3mr_issue_ioc_shutdown(mrioc);
- }
-
- if (reason == MPI3MR_COMPLETE_CLEANUP) {
- mpi3mr_free_mem(mrioc);
- mpi3mr_cleanup_resources(mrioc);
+ mpi3mr_issue_ioc_shutdown(mrioc);
}
+ dprint_exit(mrioc, "controller cleanup completed\n");
}
/**
@@ -3768,13 +4502,19 @@ static inline void mpi3mr_drv_cmd_comp_reset(struct mpi3mr_ioc *mrioc,
*
* Return: Nothing.
*/
-static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
+void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
{
struct mpi3mr_drv_cmd *cmdptr;
u8 i;
cmdptr = &mrioc->init_cmds;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->cfg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->bsg_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
cmdptr = &mrioc->host_tm_cmds;
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
@@ -3782,41 +4522,261 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
cmdptr = &mrioc->dev_rmhs_cmds[i];
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
+
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ cmdptr = &mrioc->evtack_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+ }
+
+ cmdptr = &mrioc->pel_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->pel_abort_cmd;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
+
+ cmdptr = &mrioc->transport_cmds;
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
/**
- * mpi3mr_diagfault_reset_handler - Diag fault reset handler
+ * mpi3mr_pel_wait_post - Issue PEL Wait
* @mrioc: Adapter instance reference
- * @reset_reason: Reset reason code
+ * @drv_cmd: Internal command tracker
*
- * This is an handler for issuing diag fault reset from the
- * applications through IOCTL path to stop the execution of the
- * controller
+ * Issue PEL Wait MPI request through admin queue and return.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_req_action_wait pel_wait;
+
+ mrioc->pel_abort_requested = false;
+
+ memset(&pel_wait, 0, sizeof(pel_wait));
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_pel_wait_complete;
+ drv_cmd->ioc_status = 0;
+ drv_cmd->ioc_loginfo = 0;
+ pel_wait.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+ pel_wait.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_wait.action = MPI3_PEL_ACTION_WAIT;
+ pel_wait.starting_sequence_number = cpu_to_le32(mrioc->pel_newest_seqnum);
+ pel_wait.locale = cpu_to_le16(mrioc->pel_locale);
+ pel_wait.class = cpu_to_le16(mrioc->pel_class);
+ pel_wait.wait_time = MPI3_PEL_WAITTIME_INFINITE_WAIT;
+ dprint_bsg_info(mrioc, "sending pel_wait seqnum(%d), class(%d), locale(0x%08x)\n",
+ mrioc->pel_newest_seqnum, mrioc->pel_class, mrioc->pel_locale);
+
+ if (mpi3mr_admin_request_post(mrioc, &pel_wait, sizeof(pel_wait), 0)) {
+ dprint_bsg_err(mrioc,
+ "Issuing PELWait: Admin post failed\n");
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ mrioc->pel_enabled = false;
+ }
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_post - Issue PEL Get Sequence number
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * Issue PEL get sequence number MPI request through admin queue
+ * and return.
*
* Return: 0 on success, non-zero on failure.
*/
-int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason)
+int mpi3mr_pel_get_seqnum_post(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
{
+ struct mpi3_pel_req_action_get_sequence_numbers pel_getseq_req;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
int retval = 0;
- ioc_info(mrioc, "Entry: reason code: %s\n",
- mpi3mr_reset_rc_name(reset_reason));
- mrioc->reset_in_progress = 1;
+ memset(&pel_getseq_req, 0, sizeof(pel_getseq_req));
+ mrioc->pel_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->pel_cmds.is_waiting = 0;
+ mrioc->pel_cmds.ioc_status = 0;
+ mrioc->pel_cmds.ioc_loginfo = 0;
+ mrioc->pel_cmds.callback = mpi3mr_pel_get_seqnum_complete;
+ pel_getseq_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT);
+ pel_getseq_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG;
+ pel_getseq_req.action = MPI3_PEL_ACTION_GET_SEQNUM;
+ mpi3mr_add_sg_single(&pel_getseq_req.sgl, sgl_flags,
+ mrioc->pel_seqnum_sz, mrioc->pel_seqnum_dma);
+
+ retval = mpi3mr_admin_request_post(mrioc, &pel_getseq_req,
+ sizeof(pel_getseq_req), 0);
+ if (retval) {
+ if (drv_cmd) {
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+ }
+ mrioc->pel_enabled = false;
+ }
- mpi3mr_ioc_disable_intr(mrioc);
+ return retval;
+}
- retval = mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+/**
+ * mpi3mr_pel_wait_complete - PELWait Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PELWait request and
+ * firmware completes a PELWait request when it is aborted or a
+ * new PEL entry is available. This sends AEN to the application
+ * and if the PELwait completion is not due to PELAbort then
+ * this will send a request for new PEL Sequence number
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_reply *pel_reply = NULL;
+ u16 ioc_status, pe_log_status;
+ bool do_retry = false;
- if (retval) {
- ioc_err(mrioc, "The diag fault reset failed: reason %d\n",
- reset_reason);
- mpi3mr_ioc_enable_intr(mrioc);
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto cleanup_drv_cmd;
+
+ ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "%s: Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
+ __func__, ioc_status, drv_cmd->ioc_loginfo);
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+ ioc_status, drv_cmd->ioc_loginfo);
+ do_retry = true;
}
- ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
- mrioc->reset_in_progress = 0;
- return retval;
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+
+ if (!pel_reply) {
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed due to no reply\n");
+ goto out_failed;
+ }
+
+ pe_log_status = le16_to_cpu(pel_reply->pe_log_status);
+ if ((pe_log_status != MPI3_PEL_STATUS_SUCCESS) &&
+ (pe_log_status != MPI3_PEL_STATUS_ABORTED)) {
+ ioc_err(mrioc, "%s: Failed pe_log_status(0x%04x)\n",
+ __func__, pe_log_status);
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed due to pel_log_status(0x%04x)\n",
+ pe_log_status);
+ do_retry = true;
+ }
+
+ if (do_retry) {
+ if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ dprint_bsg_err(mrioc, "pel_wait: retrying(%d)\n",
+ drv_cmd->retry_count);
+ mpi3mr_pel_wait_post(mrioc, drv_cmd);
+ return;
+ }
+ dprint_bsg_err(mrioc,
+ "pel_wait: failed after all retries(%d)\n",
+ drv_cmd->retry_count);
+ goto out_failed;
+ }
+ atomic64_inc(&event_counter);
+ if (!mrioc->pel_abort_requested) {
+ mrioc->pel_cmds.retry_count = 0;
+ mpi3mr_pel_get_seqnum_post(mrioc, &mrioc->pel_cmds);
+ }
+
+ return;
+out_failed:
+ mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
+}
+
+/**
+ * mpi3mr_pel_get_seqnum_complete - PELGetSeqNum Completion callback
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is a callback handler for the PEL get sequence number
+ * request and a new PEL wait request will be issued to the
+ * firmware from this
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_pel_get_seqnum_complete(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ struct mpi3_pel_reply *pel_reply = NULL;
+ struct mpi3_pel_seq *pel_seqnum_virt;
+ u16 ioc_status;
+ bool do_retry = false;
+
+ pel_seqnum_virt = (struct mpi3_pel_seq *)mrioc->pel_seqnum_virt;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto cleanup_drv_cmd;
+
+ ioc_status = drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed with ioc_status(0x%04x), log_info(0x%08x)\n",
+ ioc_status, drv_cmd->ioc_loginfo);
+ do_retry = true;
+ }
+
+ if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
+ pel_reply = (struct mpi3_pel_reply *)drv_cmd->reply;
+ if (!pel_reply) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed due to no reply\n");
+ goto out_failed;
+ }
+
+ if (le16_to_cpu(pel_reply->pe_log_status) != MPI3_PEL_STATUS_SUCCESS) {
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed due to pel_log_status(0x%04x)\n",
+ le16_to_cpu(pel_reply->pe_log_status));
+ do_retry = true;
+ }
+
+ if (do_retry) {
+ if (drv_cmd->retry_count < MPI3MR_PEL_RETRY_COUNT) {
+ drv_cmd->retry_count++;
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: retrying(%d)\n",
+ drv_cmd->retry_count);
+ mpi3mr_pel_get_seqnum_post(mrioc, drv_cmd);
+ return;
+ }
+
+ dprint_bsg_err(mrioc,
+ "pel_get_seqnum: failed after all retries(%d)\n",
+ drv_cmd->retry_count);
+ goto out_failed;
+ }
+ mrioc->pel_newest_seqnum = le32_to_cpu(pel_seqnum_virt->newest) + 1;
+ drv_cmd->retry_count = 0;
+ mpi3mr_pel_wait_post(mrioc, drv_cmd);
+
+ return;
+out_failed:
+ mrioc->pel_enabled = false;
+cleanup_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ drv_cmd->retry_count = 0;
}
/**
@@ -3828,7 +4788,7 @@ int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
* This is an handler for recovering controller by issuing soft
* reset are diag fault reset. This is a blocking function and
* when one reset is executed if any other resets they will be
- * blocked. All IOCTLs/IO will be blocked during the reset. If
+ * blocked. All BSG requests will be blocked during the reset. If
* controller reset is successful then the controller will be
* reinitalized, otherwise the controller will be marked as not
* recoverable
@@ -3847,34 +4807,46 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
unsigned long flags;
u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
- if (mrioc->fault_dbg) {
- if (snapdump)
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_kill_ioc(mrioc, reset_reason);
- }
-
+ /* Block the reset handler until diag save in progress*/
+ dprint_reset(mrioc,
+ "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
+ mrioc->diagsave_timeout);
+ while (mrioc->diagsave_timeout)
+ ssleep(1);
/*
* Block new resets until the currently executing one is finished and
* return the status of the existing reset for all blocked resets
*/
+ dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
if (!mutex_trylock(&mrioc->reset_mutex)) {
- ioc_info(mrioc, "Another reset in progress\n");
- return -1;
+ ioc_info(mrioc,
+ "controller reset triggered by %s is blocked due to another reset in progress\n",
+ mpi3mr_reset_rc_name(reset_reason));
+ do {
+ ssleep(1);
+ } while (mrioc->reset_in_progress == 1);
+ ioc_info(mrioc,
+ "returning previous reset result(%d) for the reset triggered by %s\n",
+ mrioc->prev_reset_result,
+ mpi3mr_reset_rc_name(reset_reason));
+ return mrioc->prev_reset_result;
}
+ ioc_info(mrioc, "controller reset is triggered by %s\n",
+ mpi3mr_reset_rc_name(reset_reason));
+
+ mrioc->device_refresh_on = 0;
mrioc->reset_in_progress = 1;
+ mrioc->stop_bsgs = 1;
+ mrioc->prev_reset_result = -1;
if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
+ (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
(reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mrioc->event_masks[i] = -1;
- retval = mpi3mr_issue_event_notification(mrioc);
-
- if (retval) {
- ioc_err(mrioc,
- "Failed to turn off events prior to reset %d\n",
- retval);
- }
+ dprint_reset(mrioc, "soft_reset_handler: masking events\n");
+ mpi3mr_issue_event_notification(mrioc);
}
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
@@ -3903,28 +4875,51 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+ if (mrioc->num_io_throttle_group !=
+ mrioc->facts.max_io_throttle_group) {
+ ioc_err(mrioc,
+ "max io throttle group doesn't match old(%d), new(%d)\n",
+ mrioc->num_io_throttle_group,
+ mrioc->facts.max_io_throttle_group);
+ retval = -EPERM;
+ goto out;
+ }
- mpi3mr_flush_delayed_rmhs_list(mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
mpi3mr_flush_drv_cmds(mrioc);
memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
- mpi3mr_cleanup_fwevt_list(mrioc);
+ memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
mpi3mr_flush_host_io(mrioc);
+ mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
+
+ if (mrioc->prepare_for_reset) {
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
mpi3mr_memset_buffers(mrioc);
- retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
+ retval = mpi3mr_reinit_ioc(mrioc, 0);
if (retval) {
pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
mrioc->name, reset_reason);
goto out;
}
- ssleep(10);
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
out:
if (!retval) {
+ mrioc->diagsave_timeout = 0;
mrioc->reset_in_progress = 0;
- scsi_unblock_requests(mrioc->shost);
- mpi3mr_rfresh_tgtdevs(mrioc);
+ mrioc->pel_abort_requested = 0;
+ if (mrioc->pel_enabled) {
+ mrioc->pel_cmds.retry_count = 0;
+ mpi3mr_pel_wait_post(mrioc, &mrioc->pel_cmds);
+ }
+
+ mrioc->device_refresh_on = 0;
+
mrioc->ts_update_counter = 0;
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
if (mrioc->watchdog_work_q)
@@ -3932,15 +4927,854 @@ out:
&mrioc->watchdog_work,
msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
+ mrioc->stop_bsgs = 0;
+ if (mrioc->pel_enabled)
+ atomic64_inc(&event_counter);
} else {
mpi3mr_issue_reset(mrioc,
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ mrioc->device_refresh_on = 0;
mrioc->unrecoverable = 1;
mrioc->reset_in_progress = 0;
retval = -1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
}
-
+ mrioc->prev_reset_result = retval;
mutex_unlock(&mrioc->reset_mutex);
- ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
+ ioc_info(mrioc, "controller reset is %s\n",
+ ((retval == 0) ? "successful" : "failed"));
return retval;
}
+
+
+/**
+ * mpi3mr_free_config_dma_memory - free memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: memory descriptor structure
+ *
+ * Check whether the size of the buffer specified by the memory
+ * descriptor is greater than the default page size if so then
+ * free the memory pointed by the descriptor.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_free_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if ((mem_desc->size > mrioc->cfg_page_sz) && mem_desc->addr) {
+ dma_free_coherent(&mrioc->pdev->dev, mem_desc->size,
+ mem_desc->addr, mem_desc->dma_addr);
+ mem_desc->addr = NULL;
+ }
+}
+
+/**
+ * mpi3mr_alloc_config_dma_memory - Alloc memory for config page
+ * @mrioc: Adapter instance reference
+ * @mem_desc: Memory descriptor to hold dma memory info
+ *
+ * This function allocates new dmaable memory or provides the
+ * default config page dmaable memory based on the memory size
+ * described by the descriptor.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_alloc_config_dma_memory(struct mpi3mr_ioc *mrioc,
+ struct dma_memory_desc *mem_desc)
+{
+ if (mem_desc->size > mrioc->cfg_page_sz) {
+ mem_desc->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ mem_desc->size, &mem_desc->dma_addr, GFP_KERNEL);
+ if (!mem_desc->addr)
+ return -ENOMEM;
+ } else {
+ mem_desc->addr = mrioc->cfg_page;
+ mem_desc->dma_addr = mrioc->cfg_page_dma;
+ memset(mem_desc->addr, 0, mrioc->cfg_page_sz);
+ }
+ return 0;
+}
+
+/**
+ * mpi3mr_post_cfg_req - Issue config requests and wait
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 configuration request to
+ * the firmware. This blocks for the completion of request for
+ * timeout seconds and if the request times out this function
+ * faults the controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req, int timeout, u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->cfg_cmds.mutex);
+ if (mrioc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending config request failed due to command in use\n");
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+ goto out;
+ }
+ mrioc->cfg_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->cfg_cmds.is_waiting = 1;
+ mrioc->cfg_cmds.callback = NULL;
+ mrioc->cfg_cmds.ioc_status = 0;
+ mrioc->cfg_cmds.ioc_loginfo = 0;
+
+ cfg_req->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_CFG_CMDS);
+ cfg_req->function = MPI3_FUNCTION_CONFIG;
+
+ init_completion(&mrioc->cfg_cmds.done);
+ dprint_cfg_info(mrioc, "posting config request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(cfg_req, sizeof(struct mpi3_config_request),
+ "mpi3_cfg_req");
+ retval = mpi3mr_admin_request_post(mrioc, cfg_req, sizeof(*cfg_req), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting config request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->cfg_cmds.done, (timeout * HZ));
+ if (!(mrioc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
+ ioc_err(mrioc, "config request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_cfg_err(mrioc,
+ "cfg_page request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->cfg_cmds.ioc_loginfo);
+
+out_unlock:
+ mrioc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->cfg_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/**
+ * mpi3mr_process_cfg_req - config page request processor
+ * @mrioc: Adapter instance reference
+ * @cfg_req: Configuration request
+ * @cfg_hdr: Configuration page header
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ * @cfg_buf: Memory pointer to copy config page or header
+ * @cfg_buf_sz: Size of the memory to get config page or header
+ *
+ * This is handler for config page read, write and config page
+ * header read operations.
+ *
+ * This function expects the cfg_req to be populated with page
+ * type, page number, action for the header read and with page
+ * address for all other operations.
+ *
+ * The cfg_hdr can be passed as null for reading required header
+ * details for read/write pages the cfg_hdr should point valid
+ * configuration page header.
+ *
+ * This allocates dmaable memory based on the size of the config
+ * buffer and set the SGE of the cfg_req.
+ *
+ * For write actions, the config page data has to be passed in
+ * the cfg_buf and size of the data has to be mentioned in the
+ * cfg_buf_sz.
+ *
+ * For read/header actions, on successful completion of the
+ * request with successful ioc_status the data will be copied
+ * into the cfg_buf limited to a minimum of actual page size and
+ * cfg_buf_sz
+ *
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_process_cfg_req(struct mpi3mr_ioc *mrioc,
+ struct mpi3_config_request *cfg_req,
+ struct mpi3_config_page_header *cfg_hdr, int timeout, u16 *ioc_status,
+ void *cfg_buf, u32 cfg_buf_sz)
+{
+ struct dma_memory_desc mem_desc;
+ int retval = -1;
+ u8 invalid_action = 0;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ memset(&mem_desc, 0, sizeof(struct dma_memory_desc));
+
+ if (cfg_req->action == MPI3_CONFIG_ACTION_PAGE_HEADER)
+ mem_desc.size = sizeof(struct mpi3_config_page_header);
+ else {
+ if (!cfg_hdr) {
+ ioc_err(mrioc, "null config header passed for config action(%d), page_type(0x%02x), page_num(%d)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number);
+ goto out;
+ }
+ switch (cfg_hdr->page_attribute & MPI3_CONFIG_PAGEATTR_MASK) {
+ case MPI3_CONFIG_PAGEATTR_READ_ONLY:
+ if (cfg_req->action
+ != MPI3_CONFIG_ACTION_READ_CURRENT)
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_CHANGEABLE:
+ if ((cfg_req->action ==
+ MPI3_CONFIG_ACTION_READ_PERSISTENT) ||
+ (cfg_req->action ==
+ MPI3_CONFIG_ACTION_WRITE_PERSISTENT))
+ invalid_action = 1;
+ break;
+ case MPI3_CONFIG_PAGEATTR_PERSISTENT:
+ default:
+ break;
+ }
+ if (invalid_action) {
+ ioc_err(mrioc,
+ "config action(%d) is not allowed for page_type(0x%02x), page_num(%d) with page_attribute(0x%02x)\n",
+ cfg_req->action, cfg_req->page_type,
+ cfg_req->page_number, cfg_hdr->page_attribute);
+ goto out;
+ }
+ mem_desc.size = le16_to_cpu(cfg_hdr->page_length) * 4;
+ cfg_req->page_length = cfg_hdr->page_length;
+ cfg_req->page_version = cfg_hdr->page_version;
+ }
+ if (mpi3mr_alloc_config_dma_memory(mrioc, &mem_desc))
+ goto out;
+
+ mpi3mr_add_sg_single(&cfg_req->sgl, sgl_flags, mem_desc.size,
+ mem_desc.dma_addr);
+
+ if ((cfg_req->action == MPI3_CONFIG_ACTION_WRITE_PERSISTENT) ||
+ (cfg_req->action == MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(mem_desc.addr, cfg_buf, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer to be written\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+ if (mpi3mr_post_cfg_req(mrioc, cfg_req, timeout, ioc_status))
+ goto out;
+
+ retval = 0;
+ if ((*ioc_status == MPI3_IOCSTATUS_SUCCESS) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_PERSISTENT) &&
+ (cfg_req->action != MPI3_CONFIG_ACTION_WRITE_CURRENT)) {
+ memcpy(cfg_buf, mem_desc.addr, min_t(u16, mem_desc.size,
+ cfg_buf_sz));
+ dprint_cfg_info(mrioc, "config buffer read\n");
+ if (mrioc->logging_level & MPI3_DEBUG_CFG_INFO)
+ dprint_dump(mem_desc.addr, mem_desc.size, "cfg_buf");
+ }
+
+out:
+ mpi3mr_free_config_dma_memory(mrioc, &mem_desc);
+ return retval;
+}
+
+/**
+ * mpi3mr_cfg_get_dev_pg0 - Read current device page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @dev_pg0: Pointer to return device page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific device
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_dev_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_device_page0 *dev_pg0, u16 pg_sz, u32 form, u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(dev_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DEVICE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "device page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_DEVICE_PGAD_FORM_MASK) |
+ (form_spec & MPI3_DEVICE_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, dev_pg0, pg_sz)) {
+ ioc_err(mrioc, "device page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg0 - Read current SAS Phy page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg0: Pointer to return SAS Phy page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page0 *phy_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas phy page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_phy_pg1 - Read current SAS Phy page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @phy_pg1: Pointer to return SAS Phy page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS Phy
+ * page1. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_phy_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_phy_page1 *phy_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(phy_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_PHY;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas phy page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas phy page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_PHY_PGAD_FORM_MASK) |
+ (form_spec & MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, phy_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas phy page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg0 - Read current SAS Expander page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg0: Pointer to return SAS Expander page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page0. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page0 *exp_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg0, pg_sz)) {
+ ioc_err(mrioc, "expander page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_exp_pg1 - Read current SAS Expander page1
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @exp_pg1: Pointer to return SAS Expander page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like phy number
+ *
+ * This is handler for config page read for a specific SAS
+ * Expander page1. The ioc_status has the controller returned
+ * ioc_status. This routine doesn't check ioc_status to decide
+ * whether the page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_exp_pg1(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_sas_expander_page1 *exp_pg1, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(exp_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_EXPANDER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "expander page1 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "expander page1 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_SAS_EXPAND_PGAD_FORM_MASK) |
+ (form_spec & (MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK |
+ MPI3_SAS_EXPAND_PGAD_HANDLE_MASK)));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, exp_pg1, pg_sz)) {
+ ioc_err(mrioc, "expander page1 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_enclosure_pg0 - Read current Enclosure page0
+ * @mrioc: Adapter instance reference
+ * @ioc_status: Pointer to return ioc status
+ * @encl_pg0: Pointer to return Enclosure page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ * @form: The form to be used for addressing the page
+ * @form_spec: Form specific information like device handle
+ *
+ * This is handler for config page read for a specific Enclosure
+ * page0. The ioc_status has the controller returned ioc_status.
+ * This routine doesn't check ioc_status to decide whether the
+ * page read is success or not and it is the callers
+ * responsibility.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_enclosure_pg0(struct mpi3mr_ioc *mrioc, u16 *ioc_status,
+ struct mpi3_enclosure_page0 *encl_pg0, u16 pg_sz, u32 form,
+ u32 form_spec)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u32 page_address;
+
+ memset(encl_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_ENCLOSURE;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "enclosure page0 header read failed\n");
+ goto out_failed;
+ }
+ if (*ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "enclosure page0 header read failed with ioc_status(0x%04x)\n",
+ *ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ page_address = ((form & MPI3_ENCLOS_PGAD_FORM_MASK) |
+ (form_spec & MPI3_ENCLOS_PGAD_HANDLE_MASK));
+ cfg_req.page_address = cpu_to_le32(page_address);
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, ioc_status, encl_pg0, pg_sz)) {
+ ioc_err(mrioc, "enclosure page0 read failed\n");
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg0 - Read current SASIOUnit page0
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg0: Pointer to return SAS IO Unit page 0
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page0. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg0(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg0, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 0;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page0 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg0, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page0 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page0 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_sas_io_unit_pg1 - Read current SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to return SAS IO Unit page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(sas_io_unit_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_set_sas_io_unit_pg1 - Write SASIOUnit page1
+ * @mrioc: Adapter instance reference
+ * @sas_io_unit_pg1: Pointer to the SAS IO Unit page 1 to write
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page write for the SAS IO Unit
+ * page1. This routine checks ioc_status to decide whether the
+ * page read is success or not. This will modify both current
+ * and persistent page.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_set_sas_io_unit_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_SAS_IO_UNIT;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "sas io unit page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write current failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write current failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+
+ cfg_req.action = MPI3_CONFIG_ACTION_WRITE_PERSISTENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, sas_io_unit_pg1, pg_sz)) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "sas io unit page1 write persistent failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
+
+/**
+ * mpi3mr_cfg_get_driver_pg1 - Read current Driver page1
+ * @mrioc: Adapter instance reference
+ * @driver_pg1: Pointer to return Driver page 1
+ * @pg_sz: Size of the memory allocated to the page pointer
+ *
+ * This is handler for config page read for the Driver page1.
+ * This routine checks ioc_status to decide whether the page
+ * read is success or not.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_ioc *mrioc,
+ struct mpi3_driver_page1 *driver_pg1, u16 pg_sz)
+{
+ struct mpi3_config_page_header cfg_hdr;
+ struct mpi3_config_request cfg_req;
+ u16 ioc_status = 0;
+
+ memset(driver_pg1, 0, pg_sz);
+ memset(&cfg_hdr, 0, sizeof(cfg_hdr));
+ memset(&cfg_req, 0, sizeof(cfg_req));
+
+ cfg_req.function = MPI3_FUNCTION_CONFIG;
+ cfg_req.action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ cfg_req.page_type = MPI3_CONFIG_PAGETYPE_DRIVER;
+ cfg_req.page_number = 1;
+ cfg_req.page_address = 0;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, NULL,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, &cfg_hdr, sizeof(cfg_hdr))) {
+ ioc_err(mrioc, "driver page1 header read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 header read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ cfg_req.action = MPI3_CONFIG_ACTION_READ_CURRENT;
+
+ if (mpi3mr_process_cfg_req(mrioc, &cfg_req, &cfg_hdr,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status, driver_pg1, pg_sz)) {
+ ioc_err(mrioc, "driver page1 read failed\n");
+ goto out_failed;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "driver page1 read failed with ioc_status(0x%04x)\n",
+ ioc_status);
+ goto out_failed;
+ }
+ return 0;
+out_failed:
+ return -1;
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index fe10f257b5a4..f77ee4051b00 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -2,7 +2,7 @@
/*
* Driver for Broadcom MPI3 Storage Controllers
*
- * Copyright (C) 2017-2021 Broadcom Inc.
+ * Copyright (C) 2017-2022 Broadcom Inc.
* (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
*
*/
@@ -14,6 +14,7 @@ LIST_HEAD(mrioc_list);
DEFINE_SPINLOCK(mrioc_list_lock);
static int mrioc_ids;
static int warn_non_secure_ctlr;
+atomic64_t event_counter;
MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
@@ -34,6 +35,13 @@ MODULE_PARM_DESC(logging_level,
" bits for enabling additional logging info (default=0)");
/* Forward declarations*/
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+
+#define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF)
+
+#define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE)
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -283,6 +291,35 @@ static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
}
/**
+ * mpi3mr_cancel_work - cancel firmware event
+ * @fwevt: fwevt object which needs to be canceled
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
+{
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from mpi3mr_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ mpi3mr_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ mpi3mr_fwevt_put(fwevt);
+ }
+}
+
+/**
* mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
* @mrioc: Adapter instance reference
*
@@ -299,29 +336,70 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
!mrioc->fwevt_worker_thread)
return;
- while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) ||
- (fwevt = mrioc->current_event)) {
+ while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
+ mpi3mr_cancel_work(fwevt);
+
+ if (mrioc->current_event) {
+ fwevt = mrioc->current_event;
/*
- * Wait on the fwevt to complete. If this returns 1, then
- * the event was never executed, and we need a put for the
- * reference the work had on the fwevt.
- *
- * If it did execute, we wait for it to finish, and the put will
- * happen from mpi3mr_process_fwevt()
+ * Don't call cancel_work_sync() API for the
+ * fwevt work if the controller reset is
+ * get called as part of processing the
+ * same fwevt work (or) when worker thread is
+ * waiting for device add/remove APIs to complete.
+ * Otherwise we will see deadlock.
*/
- if (cancel_work_sync(&fwevt->work)) {
- /*
- * Put fwevt reference count after
- * dequeuing it from worker queue
- */
- mpi3mr_fwevt_put(fwevt);
- /*
- * Put fwevt reference count to neutralize
- * kref_init increment
- */
- mpi3mr_fwevt_put(fwevt);
+ if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
+ fwevt->discard = 1;
+ return;
}
+
+ mpi3mr_cancel_work(fwevt);
+ }
+}
+
+/**
+ * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to queue on synthetically generated driver event to
+ * the event worker thread, the driver event will be used to
+ * reduce the QD of all VDs in the TG from the worker thread.
+ *
+ * Return: None.
+ */
+static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ struct mpi3mr_fwevt *fwevt;
+ u16 sz = sizeof(struct mpi3mr_throttle_group_info *);
+
+ /*
+ * If the QD reduction event is already queued due to throttle and if
+ * the QD is not restored through device info change event
+ * then dont queue further reduction events
+ */
+ if (tg->fw_qd != tg->modified_qd)
+ return;
+
+ fwevt = mpi3mr_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_warn(mrioc, "failed to queue TG QD reduction event\n");
+ return;
}
+ *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg;
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = sz;
+ tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8);
+
+ dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n",
+ tg->id);
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
}
/**
@@ -343,6 +421,11 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
if (tgtdev->starget && tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ tgt_priv->io_throttle_enabled = 0;
+ tgt_priv->io_divert = 0;
+ tgt_priv->throttle_group = NULL;
+ if (tgtdev->host_exposed)
+ atomic_set(&tgt_priv->block_io, 1);
}
}
}
@@ -351,14 +434,12 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
* mpi3mr_print_scmd - print individual SCSI command
* @rq: Block request
* @data: Adapter instance reference
- * @reserved: N/A. Currently not used
*
* Print the SCSI command details if it is in LLD scope.
*
* Return: true always.
*/
-static bool mpi3mr_print_scmd(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_print_scmd(struct request *rq, void *data)
{
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -382,7 +463,6 @@ out:
* mpi3mr_flush_scmd - Flush individual SCSI command
* @rq: Block request
* @data: Adapter instance reference
- * @reserved: N/A. Currently not used
*
* Return the SCSI command to the upper layers if it is in LLD
* scope.
@@ -390,8 +470,7 @@ out:
* Return: true always.
*/
-static bool mpi3mr_flush_scmd(struct request *rq,
- void *data, bool reserved)
+static bool mpi3mr_flush_scmd(struct request *rq, void *data)
{
struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
@@ -418,6 +497,70 @@ out:
}
/**
+ * mpi3mr_count_dev_pending - Count commands pending for a lun
+ * @rq: Block request
+ * @data: SCSI device reference
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific device(lun) then device specific pending I/O counter
+ * is updated in the device structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_dev_pending(struct request *rq, void *data)
+{
+ struct scsi_device *sdev = (struct scsi_device *)data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device == sdev)
+ sdev_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
+ * mpi3mr_count_tgt_pending - Count commands pending for target
+ * @rq: Block request
+ * @data: SCSI target reference
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific target then target specific pending I/O counter is
+ * updated in the target structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_tgt_pending(struct request *rq, void *data)
+{
+ struct scsi_target *starget = (struct scsi_target *)data;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device && (scsi_target(scmd->device) == starget))
+ stgt_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
* mpi3mr_flush_host_io - Flush host I/Os
* @mrioc: Adapter instance reference
*
@@ -440,6 +583,39 @@ void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
}
/**
+ * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds
+ * @mrioc: Adapter instance reference
+ *
+ * This function waits for currently running IO poll threads to
+ * exit and then flushes all host I/Os and any internal pending
+ * cmds. This is executed after controller is marked as
+ * unrecoverable.
+ *
+ * Return: Nothing.
+ */
+void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc)
+{
+ struct Scsi_Host *shost = mrioc->shost;
+ int i;
+
+ if (!mrioc->unrecoverable)
+ return;
+
+ if (mrioc->op_reply_qinfo) {
+ for (i = 0; i < mrioc->num_queues; i++) {
+ while (atomic_read(&mrioc->op_reply_qinfo[i].in_use))
+ udelay(500);
+ atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0);
+ }
+ }
+ mrioc->flush_io_count = 0;
+ blk_mq_tagset_busy_iter(&shost->tag_set,
+ mpi3mr_flush_scmd, (void *)mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
+ mpi3mr_flush_drv_cmds(mrioc);
+}
+
+/**
* mpi3mr_alloc_tgtdev - target device allocator
*
* Allocate target device instance and initialize the reference
@@ -537,7 +713,7 @@ found_tgtdev:
*
* Return: Target device reference.
*/
-static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
+struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
struct mpi3mr_ioc *mrioc, u16 handle)
{
struct mpi3mr_tgt_dev *tgtdev;
@@ -620,6 +796,53 @@ static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
}
/**
+ * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ * @divert_value: 1 or 0
+ *
+ * Accessor to set io_divert flag for each device associated
+ * with the given throttle group with the given value.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg, u8 divert_value)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg)
+ tgt_priv->io_divert = divert_value;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
+ * mpi3mr_print_device_event_notice - print notice related to post processing of
+ * device event after controller reset.
+ *
+ * @mrioc: Adapter instance reference
+ * @device_add: true for device add event and false for device removal event
+ *
+ * Return: None.
+ */
+void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
+ bool device_add)
+{
+ ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
+ (device_add ? "addition" : "removal"));
+ ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
+ ioc_notice(mrioc, "are matched with attached devices for correctness\n");
+}
+
+/**
* mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
* @mrioc: Adapter instance reference
* @tgtdev: Target device structure
@@ -630,7 +853,7 @@ static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
*
* Return: 0 on success, non zero on failure.
*/
-static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
+void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
struct mpi3mr_tgt_dev *tgtdev)
{
struct mpi3mr_stgt_priv_data *tgt_priv;
@@ -639,13 +862,29 @@ static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
if (tgtdev->starget && tgtdev->starget->hostdata) {
tgt_priv = tgtdev->starget->hostdata;
+ atomic_set(&tgt_priv->block_io, 0);
tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
}
- if (tgtdev->starget) {
- scsi_remove_target(&tgtdev->starget->dev);
- tgtdev->host_exposed = 0;
- }
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) {
+ if (tgtdev->starget) {
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_remove_target(&tgtdev->starget->dev);
+ tgtdev->host_exposed = 0;
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc,
+ false);
+ return;
+ }
+ }
+ }
+ } else
+ mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev);
+
ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
__func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
}
@@ -667,23 +906,37 @@ static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev;
+ if (mrioc->reset_in_progress)
+ return -1;
+
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (!tgtdev) {
retval = -1;
goto out;
}
- if (tgtdev->is_hidden) {
+ if (tgtdev->is_hidden || tgtdev->host_exposed) {
retval = -1;
goto out;
}
- if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
+ if (!mrioc->sas_transport_enabled || (tgtdev->dev_type !=
+ MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){
tgtdev->host_exposed = 1;
- scsi_scan_target(&mrioc->shost->shost_gendev, 0,
- tgtdev->perst_id,
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+ scsi_scan_target(&mrioc->shost->shost_gendev,
+ mrioc->scsi_device_channel, tgtdev->perst_id,
SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
if (!tgtdev->starget)
tgtdev->host_exposed = 0;
- }
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard) {
+ mpi3mr_print_device_event_notice(mrioc, true);
+ goto out;
+ }
+ }
+ } else
+ mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev);
out:
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
@@ -714,6 +967,7 @@ static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
else if (!q_depth)
q_depth = MPI3MR_DEFAULT_SDEV_QD;
retval = scsi_change_queue_depth(sdev, q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
return retval;
}
@@ -742,11 +996,18 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
switch (tgtdev->dev_type) {
case MPI3_DEVICE_DEVFORM_PCIE:
/*The block layer hw sector size = 512*/
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgtdev->dev_spec.pcie_inf.mdts / 512);
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
-
+ if ((tgtdev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgtdev->dev_spec.pcie_inf.mdts / 512);
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
+ }
break;
default:
break;
@@ -770,9 +1031,11 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
- if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
- tgtdev->host_exposed) {
- mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
+ dprint_reset(mrioc, "removing target device with perst_id(%d)\n",
+ tgtdev->perst_id);
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
}
@@ -791,6 +1054,7 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* @mrioc: Adapter instance reference
* @tgtdev: Target device internal structure
* @dev_pg0: New device page0
+ * @is_added: Flag to indicate the device is just added
*
* Update the information from the device page0 into the driver
* cached target device structure.
@@ -798,30 +1062,62 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
* Return: Nothing.
*/
static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
+ struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0,
+ bool is_added)
{
u16 flags = 0;
- struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
+ struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
u8 prot_mask = 0;
tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
tgtdev->dev_type = dev_pg0->device_form;
+ tgtdev->io_unit_port = dev_pg0->io_unit_port;
tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
tgtdev->slot = le16_to_cpu(dev_pg0->slot);
tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
+ tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags);
+
+ if (tgtdev->encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ tgtdev->encl_handle);
+ if (enclosure_dev)
+ tgtdev->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+
+ flags = tgtdev->devpg0_flag;
- flags = le16_to_cpu(dev_pg0->flags);
tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
+ if (is_added == true)
+ tgtdev->io_throttle_enabled =
+ (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+
+
if (tgtdev->starget && tgtdev->starget->hostdata) {
scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
tgtdev->starget->hostdata;
scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgtdev->io_throttle_enabled;
+ if (is_added == true)
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ }
+
+ switch (dev_pg0->access_status) {
+ case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI3_DEVICE0_ASTATUS_PREPARE:
+ case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
+ case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
+ break;
+ default:
+ tgtdev->is_hidden = 1;
+ break;
}
switch (tgtdev->dev_type) {
@@ -834,12 +1130,25 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
tgtdev->dev_spec.sas_sata_inf.sas_address =
le64_to_cpu(sasinf->sas_address);
+ tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num;
+ tgtdev->dev_spec.sas_sata_inf.attached_phy_id =
+ sasinf->attached_phy_identifier;
if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
tgtdev->is_hidden = 1;
else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
tgtdev->is_hidden = 1;
+
+ if (((tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)
+ && (tgtdev->devpg0_flag &
+ MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) ||
+ (tgtdev->parent_handle == 0xFFFF))
+ tgtdev->non_stl = 1;
+ if (tgtdev->dev_spec.sas_sata_inf.hba_port)
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id =
+ dev_pg0->io_unit_port;
break;
}
case MPI3_DEVICE_DEVFORM_PCIE:
@@ -848,6 +1157,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
&dev_pg0->device_specific.pcie_format;
u16 dev_info = le16_to_cpu(pcieinf->device_info);
+ tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
tgtdev->dev_spec.pcie_inf.capb =
le32_to_cpu(pcieinf->capabilities);
tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
@@ -858,15 +1168,20 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
le32_to_cpu(pcieinf->maximum_data_transfer_size);
tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
tgtdev->dev_spec.pcie_inf.reset_to =
- pcieinf->controller_reset_to;
+ max_t(u8, pcieinf->controller_reset_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
tgtdev->dev_spec.pcie_inf.abort_to =
- pcieinf->nv_me_abort_to;
+ max_t(u8, pcieinf->nvme_abort_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
}
if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
- if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
- MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
+ if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
if (!mrioc->shost)
break;
prot_mask = scsi_host_get_prot(mrioc->shost);
@@ -883,10 +1198,33 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
{
struct mpi3_device0_vd_format *vdinf =
&dev_pg0->device_specific.vd_format;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u16 vdinf_io_throttle_group =
+ le16_to_cpu(vdinf->io_throttle_group);
- tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
+ tgtdev->dev_spec.vd_inf.state = vdinf->vd_state;
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
+ tgtdev->non_stl = 1;
+ tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
+ tgtdev->dev_spec.vd_inf.tg_high =
+ le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
+ tgtdev->dev_spec.vd_inf.tg_low =
+ le16_to_cpu(vdinf->io_throttle_group_low) * 2048;
+ if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) {
+ tg = mrioc->throttle_groups + vdinf_io_throttle_group;
+ tg->id = vdinf_io_throttle_group;
+ tg->high = tgtdev->dev_spec.vd_inf.tg_high;
+ tg->low = tgtdev->dev_spec.vd_inf.tg_low;
+ tg->qd_reduction =
+ tgtdev->dev_spec.vd_inf.tg_qd_reduction;
+ if (is_added == true)
+ tg->fw_qd = tgtdev->q_depth;
+ tg->modified_qd = tgtdev->q_depth;
+ }
+ tgtdev->dev_spec.vd_inf.tg = tg;
+ if (scsi_tgt_priv_data)
+ scsi_tgt_priv_data->throttle_group = tg;
break;
}
default:
@@ -983,7 +1321,7 @@ static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
if (!tgtdev)
goto out;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false);
if (!tgtdev->is_hidden && !tgtdev->host_exposed)
mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
if (tgtdev->is_hidden && tgtdev->host_exposed)
@@ -997,6 +1335,135 @@ out:
}
/**
+ * mpi3mr_free_enclosure_list - release enclosures
+ * @mrioc: Adapter instance reference
+ *
+ * Free memory allocated during encloure add.
+ *
+ * Return nothing.
+ */
+void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next;
+
+ list_for_each_entry_safe(enclosure_dev,
+ enclosure_dev_next, &mrioc->enclosure_list, list) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ }
+}
+
+/**
+ * mpi3mr_enclosure_find_by_handle - enclosure search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the enclosure
+ *
+ * This searches for enclosure device based on handle, then returns the
+ * enclosure object.
+ *
+ * Return: Enclosure object reference or NULL
+ */
+struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle(
+ struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL;
+
+ list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) {
+ if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle)
+ continue;
+ r = enclosure_dev;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event
+ * @mrioc: Adapter instance reference
+ * @encl_pg0: Enclosure page 0.
+ * @is_added: Added event or not
+ *
+ * Return nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc,
+ struct mpi3_enclosure_page0 *encl_pg0, u8 is_added)
+{
+ char *reason_str = NULL;
+
+ if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK))
+ return;
+
+ if (is_added)
+ reason_str = "enclosure added";
+ else
+ reason_str = "enclosure dev status changed";
+
+ ioc_info(mrioc,
+ "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n",
+ reason_str, le16_to_cpu(encl_pg0->enclosure_handle),
+ (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id));
+ ioc_info(mrioc,
+ "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n",
+ le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port,
+ le16_to_cpu(encl_pg0->flags),
+ ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4));
+}
+
+/**
+ * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Prints information about the Enclosure device status or
+ * Enclosure add events if logging is enabled and add or remove
+ * the enclosure from the controller's internal list of
+ * enclosures.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ struct mpi3mr_enclosure_node *enclosure_dev = NULL;
+ struct mpi3_enclosure_page0 *encl_pg0;
+ u16 encl_handle;
+ u8 added, present;
+
+ encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data;
+ added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0;
+ mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added);
+
+
+ encl_handle = le16_to_cpu(encl_pg0->enclosure_handle);
+ present = ((le16_to_cpu(encl_pg0->flags) &
+ MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4);
+
+ if (encl_handle)
+ enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc,
+ encl_handle);
+ if (!enclosure_dev && present) {
+ enclosure_dev =
+ kzalloc(sizeof(struct mpi3mr_enclosure_node),
+ GFP_KERNEL);
+ if (!enclosure_dev)
+ return;
+ list_add_tail(&enclosure_dev->list,
+ &mrioc->enclosure_list);
+ }
+ if (enclosure_dev) {
+ if (!present) {
+ list_del(&enclosure_dev->list);
+ kfree(enclosure_dev);
+ } else
+ memcpy(&enclosure_dev->pg0, encl_pg0,
+ sizeof(enclosure_dev->pg0));
+
+ }
+}
+
+/**
* mpi3mr_sastopochg_evt_debug - SASTopoChange details
* @mrioc: Adapter instance reference
* @event_data: SAS topology change list event data
@@ -1035,8 +1502,9 @@ mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "%s :sas topology change: (%s)\n",
__func__, status_str);
ioc_info(mrioc,
- "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
+ "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
__func__, le16_to_cpu(event_data->expander_dev_handle),
+ event_data->io_unit_port,
le16_to_cpu(event_data->enclosure_handle),
event_data->start_phy_num, event_data->num_entries);
for (i = 0; i < event_data->num_entries; i++) {
@@ -1094,11 +1562,34 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
int i;
u16 handle;
u8 reason_code;
+ u64 exp_sas_address = 0, parent_sas_address = 0;
+ struct mpi3mr_hba_port *hba_port = NULL;
struct mpi3mr_tgt_dev *tgtdev = NULL;
+ struct mpi3mr_sas_node *sas_expander = NULL;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate, parent_phy_number;
mpi3mr_sastopochg_evt_debug(mrioc, event_data);
+ if (mrioc->sas_transport_enabled) {
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc,
+ event_data->io_unit_port);
+ if (le16_to_cpu(event_data->expander_dev_handle)) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = __mpi3mr_expander_find_by_handle(mrioc,
+ le16_to_cpu(event_data->expander_dev_handle));
+ if (sas_expander) {
+ exp_sas_address = sas_expander->sas_address;
+ hba_port = sas_expander->hba_port;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ parent_sas_address = exp_sas_address;
+ } else
+ parent_sas_address = mrioc->sas_hba.sas_address;
+ }
for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
if (!handle)
continue;
@@ -1116,12 +1607,37 @@ static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
break;
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
+ case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
+ {
+ if (!mrioc->sas_transport_enabled || tgtdev->non_stl
+ || tgtdev->is_hidden)
+ break;
+ link_rate = event_data->phy_entry[i].link_rate >> 4;
+ prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
+ if (link_rate == prev_link_rate)
+ break;
+ if (!parent_sas_address)
+ break;
+ parent_phy_number = event_data->start_phy_num + i;
+ mpi3mr_update_links(mrioc, parent_sas_address, handle,
+ parent_phy_number, link_rate, hba_port);
+ break;
+ }
default:
break;
}
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
}
+
+ if (mrioc->sas_transport_enabled && (event_data->exp_status ==
+ MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) {
+ if (sas_expander)
+ mpi3mr_expander_remove(mrioc, exp_sas_address,
+ hba_port);
+ }
}
/**
@@ -1230,6 +1746,8 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
for (i = 0; i < event_data->num_entries; i++) {
+ if (fwevt->discard)
+ return;
handle =
le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
if (!handle)
@@ -1256,6 +1774,77 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_logdata_evt_bh - Log data event bottomhalf
+ * @mrioc: Adapter instance reference
+ * @fwevt: Firmware event reference
+ *
+ * Extracts the event data and calls application interfacing
+ * function to process the event further.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_fwevt *fwevt)
+{
+ mpi3mr_app_save_logdata(mrioc, fwevt->event_data,
+ fwevt->event_data_size);
+}
+
+/**
+ * mpi3mr_update_sdev_qd - Update SCSI device queue depath
+ * @sdev: SCSI device reference
+ * @data: Queue depth reference
+ *
+ * This is an iterator function called for each SCSI device in a
+ * target to update the QD of each SCSI device.
+ *
+ * Return: Nothing.
+ */
+static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data)
+{
+ u16 *q_depth = (u16 *)data;
+
+ scsi_change_queue_depth(sdev, (int)*q_depth);
+ sdev->max_queue_depth = sdev->queue_depth;
+}
+
+/**
+ * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs
+ * @mrioc: Adapter instance reference
+ * @tg: Throttle group information pointer
+ *
+ * Accessor to reduce QD for each device associated with the
+ * given throttle group.
+ *
+ * Return: None.
+ */
+static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_throttle_group_info *tg)
+{
+ unsigned long flags;
+ struct mpi3mr_tgt_dev *tgtdev;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ if (tgtdev->starget && tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ if (tgt_priv->throttle_group == tg) {
+ dprint_event_bh(mrioc,
+ "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n",
+ tgt_priv->perst_id, tgtdev->q_depth,
+ tg->modified_qd);
+ starget_for_each_device(tgtdev->starget,
+ (void *)&tg->modified_qd,
+ mpi3mr_update_sdev_qd);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+}
+
+/**
* mpi3mr_fwevt_bh - Firmware event bottomhalf handler
* @mrioc: Adapter instance reference
* @fwevt: Firmware event reference
@@ -1268,28 +1857,54 @@ static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
struct mpi3mr_fwevt *fwevt)
{
- mrioc->current_event = fwevt;
+ struct mpi3_device_page0 *dev_pg0 = NULL;
+ u16 perst_id, handle, dev_info;
+ struct mpi3_device0_sas_sata_format *sasinf = NULL;
+
mpi3mr_fwevt_del_from_list(mrioc, fwevt);
+ mrioc->current_event = fwevt;
if (mrioc->stop_drv_processing)
goto out;
+ if (mrioc->unrecoverable) {
+ dprint_event_bh(mrioc,
+ "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n",
+ fwevt->event_id);
+ goto out;
+ }
+
if (!fwevt->process_evt)
goto evt_ack;
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
{
- struct mpi3_device_page0 *dev_pg0 =
- (struct mpi3_device_page0 *)fwevt->event_data;
- mpi3mr_report_tgtdev_to_host(mrioc,
- le16_to_cpu(dev_pg0->persistent_id));
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ handle = le16_to_cpu(dev_pg0->dev_handle);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
+ else if (mrioc->sas_transport_enabled &&
+ (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ sasinf = &dev_pg0->device_specific.sas_sata_format;
+ dev_info = le16_to_cpu(sasinf->device_info);
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_is_expander_device(dev_info))
+ mpi3mr_expander_add(mrioc, handle);
+ }
break;
}
case MPI3_EVENT_DEVICE_INFO_CHANGED:
{
- mpi3mr_devinfochg_evt_bh(mrioc,
- (struct mpi3_device_page0 *)fwevt->event_data);
+ dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data;
+ perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID)
+ mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0);
break;
}
case MPI3_EVENT_DEVICE_STATUS_CHANGE:
@@ -1297,6 +1912,13 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
break;
}
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ {
+ mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt);
+ break;
+ }
+
case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
{
mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
@@ -1307,13 +1929,48 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
break;
}
+ case MPI3_EVENT_LOG_DATA:
+ {
+ mpi3mr_logdata_evt_bh(mrioc, fwevt);
+ break;
+ }
+ case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION:
+ {
+ struct mpi3mr_throttle_group_info *tg;
+
+ tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data;
+ dprint_event_bh(mrioc,
+ "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n",
+ tg->id, tg->need_qd_reduction);
+ if (tg->need_qd_reduction) {
+ mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg);
+ tg->need_qd_reduction = 0;
+ }
+ break;
+ }
+ case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH:
+ {
+ while (mrioc->device_refresh_on)
+ msleep(500);
+
+ dprint_event_bh(mrioc,
+ "scan for non responding and newly added devices after soft reset started\n");
+ if (mrioc->sas_transport_enabled) {
+ mpi3mr_refresh_sas_ports(mrioc);
+ mpi3mr_refresh_expanders(mrioc);
+ }
+ mpi3mr_rfresh_tgtdevs(mrioc);
+ ioc_info(mrioc,
+ "scan for non responding and newly added devices after soft reset completed\n");
+ break;
+ }
default:
break;
}
evt_ack:
if (fwevt->send_ack)
- mpi3mr_send_event_ack(mrioc, fwevt->event_id,
+ mpi3mr_process_event_ack(mrioc, fwevt->event_id,
fwevt->evt_ctx);
out:
/* Put fwevt reference count to neutralize kref_init increment */
@@ -1361,15 +2018,18 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
u16 perst_id = 0;
perst_id = le16_to_cpu(dev_pg0->persistent_id);
+ if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID)
+ return retval;
+
tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
if (tgtdev) {
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_put(tgtdev);
} else {
tgtdev = mpi3mr_alloc_tgtdev();
if (!tgtdev)
return -ENOMEM;
- mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
+ mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true);
mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
}
@@ -1377,24 +2037,33 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
}
/**
- * mpi3mr_flush_delayed_rmhs_list - Flush pending commands
+ * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
* @mrioc: Adapter instance reference
*
- * Flush pending commands in the delayed removal handshake list
- * due to a controller reset or driver removal as a cleanup.
+ * Flush pending commands in the delayed lists due to a
+ * controller reset or driver removal as a cleanup.
*
* Return: Nothing
*/
-void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc)
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
{
struct delayed_dev_rmhs_node *_rmhs_node;
+ struct delayed_evt_ack_node *_evtack_node;
+ dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
while (!list_empty(&mrioc->delayed_rmhs_list)) {
_rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
struct delayed_dev_rmhs_node, list);
list_del(&_rmhs_node->list);
kfree(_rmhs_node);
}
+ dprint_reset(mrioc, "flushing delayed event ack commands\n");
+ while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ list_del(&_evtack_node->list);
+ kfree(_evtack_node);
+ }
}
/**
@@ -1414,6 +2083,9 @@ static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
ioc_info(mrioc,
"%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
__func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
@@ -1454,6 +2126,8 @@ static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
kfree(delayed_dev_rmhs);
return;
}
+
+clear_drv_cmd:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
drv_cmd->retry_count = 0;
@@ -1480,6 +2154,9 @@ static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
int retval;
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
@@ -1508,11 +2185,11 @@ static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
if (retval) {
pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
mrioc->name);
- goto out_failed;
+ goto clear_drv_cmd;
}
return;
-out_failed:
+clear_drv_cmd:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
drv_cmd->callback = NULL;
drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
@@ -1611,6 +2288,145 @@ out_failed:
}
/**
+ * mpi3mr_complete_evt_ack - event ack request completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is the completion handler for non blocking event
+ * acknowledgment sent to the firmware and this will issue any
+ * pending event acknowledgment request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd->state & MPI3MR_CMD_RESET)
+ goto clear_drv_cmd;
+
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_event_th(mrioc,
+ "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ drv_cmd->ioc_loginfo);
+ }
+
+ if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ delayed_evtack =
+ list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
+ delayed_evtack->event_ctx);
+ list_del(&delayed_evtack->list);
+ kfree(delayed_evtack);
+ return;
+ }
+clear_drv_cmd:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
+ * mpi3mr_send_event_ack - Issue event acknwoledgment request
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event id
+ * @cmdparam: Internal command tracker
+ * @event_ctx: event context
+ *
+ * Issues event acknowledgment request to the firmware if there
+ * is a free command to send the event ack else it to a pend
+ * list so that it will be processed on a completion of a prior
+ * event acknowledgment .
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+ u8 retrycount = 5;
+ u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd) {
+ dprint_event_th(mrioc,
+ "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ goto issue_cmd;
+ }
+ dprint_event_th(mrioc,
+ "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
+ MPI3MR_NUM_EVTACKCMD);
+ if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
+ if (!test_and_set_bit(cmd_idx,
+ mrioc->evtack_cmds_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
+ delayed_evtack = kzalloc(sizeof(*delayed_evtack),
+ GFP_ATOMIC);
+ if (!delayed_evtack)
+ return;
+ INIT_LIST_HEAD(&delayed_evtack->list);
+ delayed_evtack->event = event;
+ delayed_evtack->event_ctx = event_ctx;
+ list_add_tail(&delayed_evtack->list,
+ &mrioc->delayed_evtack_cmds_list);
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
+ event, event_ctx);
+ return;
+ }
+ drv_cmd = &mrioc->evtack_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ dprint_event_th(mrioc,
+ "sending event ack failed due to command in use\n");
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_complete_evt_ack;
+ evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ dprint_event_th(mrioc,
+ "posting event ack request is failed\n");
+ goto out_failed;
+ }
+
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
+ event, event_ctx);
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
* mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1819,6 +2635,40 @@ out:
}
/**
+ * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Blocks and unblocks host level I/O based on the reason code
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_prepare_for_reset *evtdata =
+ (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
+
+ if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
+ dprint_event_th(mrioc,
+ "prepare for reset event top half with rc=start\n");
+ if (mrioc->prepare_for_reset)
+ return;
+ mrioc->prepare_for_reset = 1;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
+ dprint_event_th(mrioc,
+ "prepare for reset top half with rc=abort\n");
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
+ le32_to_cpu(event_reply->event_context));
+}
+
+/**
* mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1848,6 +2698,71 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Displays Cable manegemt event details.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_cable_management *evtdata =
+ (struct mpi3_event_data_cable_management *)event_reply->event_data;
+
+ switch (evtdata->status) {
+ case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
+ {
+ ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
+ "Devices connected to this cable are not detected.\n"
+ "This cable requires %d mW of power.\n",
+ evtdata->receptacle_id,
+ le32_to_cpu(evtdata->active_cable_power_requirement));
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
+ {
+ ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
+ evtdata->receptacle_id);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event
+ * @mrioc: Adapter instance reference
+ *
+ * Add driver specific event to make sure that the driver won't process the
+ * events until all the devices are refreshed during soft reset.
+ *
+ * Return: Nothing
+ */
+void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_fwevt *fwevt = NULL;
+
+ fwevt = mpi3mr_alloc_fwevt(0);
+ if (!fwevt) {
+ dprint_event_th(mrioc,
+ "failed to schedule bottom half handler for event(0x%02x)\n",
+ MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH);
+ return;
+ }
+ fwevt->mrioc = mrioc;
+ fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH;
+ fwevt->send_ack = 0;
+ fwevt->process_evt = 1;
+ fwevt->evt_ctx = 0;
+ fwevt->event_data_size = 0;
+ mpi3mr_fwevt_add_to_list(mrioc, fwevt);
+}
+
+/**
* mpi3mr_os_handle_events - Firmware event handler
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1905,7 +2820,16 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
break;
}
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ {
+ mpi3mr_preparereset_evt_th(mrioc, event_reply);
+ ack_req = 0;
+ break;
+ }
case MPI3_EVENT_DEVICE_INFO_CHANGED:
+ case MPI3_EVENT_LOG_DATA:
+ case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI3_EVENT_ENCL_DEVICE_ADDED:
{
process_evt_bh = 1;
break;
@@ -1915,9 +2839,12 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_energypackchg_evt_th(mrioc, event_reply);
break;
}
- case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
- case MPI3_EVENT_SAS_DISCOVERY:
case MPI3_EVENT_CABLE_MGMT:
+ {
+ mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_SAS_DISCOVERY:
case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
case MPI3_EVENT_PCIE_ENUMERATION:
@@ -2144,6 +3071,11 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
u32 xfer_count = 0, sense_count = 0, resp_data = 0;
u16 dev_handle = 0xFFFF;
struct scsi_sense_hdr sshdr;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+ u8 throttle_enabled_dev = 0;
*reply_dma = 0;
reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
@@ -2200,10 +3132,57 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
goto out;
}
priv = scsi_cmd_priv(scmd);
+
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ sdev_priv_data = scmd->device->hostdata;
+ if (sdev_priv_data) {
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (stgt_priv_data) {
+ tg = stgt_priv_data->throttle_group;
+ throttle_enabled_dev =
+ stgt_priv_data->io_throttle_enabled;
+ }
+ }
+ if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) &&
+ throttle_enabled_dev)) {
+ ioc_pend_data_len = atomic_sub_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (tg) {
+ tg_pend_data_len = atomic_sub_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (tg->io_divert && ((ioc_pend_data_len <=
+ mrioc->io_throttle_low) &&
+ (tg_pend_data_len <= tg->low))) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ } else {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+ }
+ } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) {
+ ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz);
+ if (!tg) {
+ if (ioc_pend_data_len <= mrioc->io_throttle_low)
+ stgt_priv_data->io_divert = 0;
+
+ } else if (ioc_pend_data_len <= mrioc->io_throttle_low) {
+ tg_pend_data_len = atomic_read(&tg->pend_large_data_sz);
+ if (tg->io_divert && (tg_pend_data_len <= tg->low)) {
+ tg->io_divert = 0;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(
+ mrioc, tg, 0);
+ }
+ }
+ }
+
if (success_desc) {
scmd->result = DID_OK << 16;
goto out_success;
}
+
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
@@ -2520,49 +3499,63 @@ static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
}
/**
- * mpi3mr_print_response_code - print TM response as a string
- * @mrioc: Adapter instance reference
+ * mpi3mr_tm_response_name - get TM response as a string
* @resp_code: TM response code
*
- * Print TM response code as a readable string.
+ * Convert known task management response code as a readable
+ * string.
*
- * Return: Nothing.
+ * Return: response code string.
*/
-static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
+static const char *mpi3mr_tm_response_name(u8 resp_code)
{
char *desc;
switch (resp_code) {
- case MPI3MR_RSP_TM_COMPLETE:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
desc = "task management request completed";
break;
- case MPI3MR_RSP_INVALID_FRAME:
+ case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
desc = "invalid frame";
break;
- case MPI3MR_RSP_TM_NOT_SUPPORTED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
desc = "task management request not supported";
break;
- case MPI3MR_RSP_TM_FAILED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
desc = "task management request failed";
break;
- case MPI3MR_RSP_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
desc = "task management request succeeded";
break;
- case MPI3MR_RSP_TM_INVALID_LUN:
- desc = "invalid lun";
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
+ desc = "invalid LUN";
break;
- case MPI3MR_RSP_TM_OVERLAPPED_TAG:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
desc = "overlapped tag attempted";
break;
- case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
desc = "task queued, however not sent to target";
break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
+ desc = "task management request denied by NVMe device";
+ break;
default:
desc = "unknown";
break;
}
- ioc_info(mrioc, "%s :response_code(0x%01x): %s\n", __func__,
- resp_code, desc);
+
+ return desc;
+}
+
+inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ int num_of_reply_queues =
+ mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
+
+ for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
+ mpi3mr_process_op_reply_q(mrioc,
+ mrioc->intr_info[i].op_reply_q);
}
/**
@@ -2572,9 +3565,10 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
* @handle: Device handle
* @lun: lun ID
* @htag: Host tag of the TM request
+ * @timeout: TM timeout value
* @drv_cmd: Internal command tracker
* @resp_code: Response code place holder
- * @cmd_priv: SCSI command private data
+ * @scmd: SCSI command
*
* Issues a Task Management Request to the controller for a
* specified target, lun and command and wait for its completion
@@ -2583,17 +3577,19 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
*
* Return: 0 on success, non-zero on errors
*/
-static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
u16 handle, uint lun, u16 htag, ulong timeout,
struct mpi3mr_drv_cmd *drv_cmd,
- u8 *resp_code, struct scmd_priv *cmd_priv)
+ u8 *resp_code, struct scsi_cmnd *scmd)
{
struct mpi3_scsi_task_mgmt_request tm_req;
struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev = NULL;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
- struct op_req_qinfo *op_req_q = NULL;
+ struct scmd_priv *cmd_priv = NULL;
+ struct scsi_device *sdev = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
__func__, tm_type, handle);
@@ -2630,16 +3626,21 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
- if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
- scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
- tgtdev->starget->hostdata;
- atomic_inc(&scsi_tgt_priv_data->block_io);
- }
- if (cmd_priv) {
- op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
- tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
- tm_req.task_request_queue_id = cpu_to_le16(op_req_q->qid);
+
+ if (scmd) {
+ sdev = scmd->device;
+ sdev_priv_data = sdev->hostdata;
+ scsi_tgt_priv_data = ((sdev_priv_data) ?
+ sdev_priv_data->tgt_priv_data : NULL);
+ } else {
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
}
+
+ if (scsi_tgt_priv_data)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+
if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
timeout = tgtdev->dev_spec.pcie_inf.abort_to;
@@ -2656,39 +3657,49 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "%s :Issue TM: command timed out\n", __func__);
drv_cmd->is_waiting = 0;
retval = -1;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
+ dprint_tm(mrioc,
+ "task management request timed out after %ld seconds\n",
+ timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_TM)
+ dprint_dump_req(&tm_req, sizeof(tm_req)/4);
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ }
goto out_unlock;
}
- if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
- tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
-
- if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
- ioc_err(mrioc,
- "%s :Issue TM: handle(0x%04x) Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
- __func__, handle, drv_cmd->ioc_status,
- drv_cmd->ioc_loginfo);
+ if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
+ dprint_tm(mrioc, "invalid task management reply message\n");
retval = -1;
goto out_unlock;
}
- if (!tm_reply) {
- ioc_err(mrioc, "%s :Issue TM: No TM Reply message\n", __func__);
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ switch (drv_cmd->ioc_status) {
+ case MPI3_IOCSTATUS_SUCCESS:
+ *resp_code = le32_to_cpu(tm_reply->response_data) &
+ MPI3MR_RI_MASK_RESPCODE;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
+ break;
+ default:
+ dprint_tm(mrioc,
+ "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
retval = -1;
goto out_unlock;
}
- *resp_code = le32_to_cpu(tm_reply->response_data) &
- MPI3MR_RI_MASK_RESPCODE;
switch (*resp_code) {
- case MPI3MR_RSP_TM_SUCCEEDED:
- case MPI3MR_RSP_TM_COMPLETE:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
break;
- case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
retval = -1;
break;
@@ -2697,14 +3708,37 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
break;
}
- ioc_info(mrioc,
- "%s :Issue TM: Completed TM type (0x%x) handle(0x%04x) ",
- __func__, tm_type, handle);
- ioc_info(mrioc,
- "with ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
- drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
- le32_to_cpu(tm_reply->termination_count));
- mpi3mr_print_response_code(mrioc, *resp_code);
+ dprint_tm(mrioc,
+ "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
+ tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count),
+ mpi3mr_tm_response_name(*resp_code), *resp_code);
+
+ if (!retval) {
+ mpi3mr_ioc_disable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ }
+ switch (tm_type) {
+ case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!scsi_tgt_priv_data)
+ break;
+ scsi_tgt_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_tgt_pending,
+ (void *)scsi_tgt_priv_data->starget);
+ break;
+ case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!sdev_priv_data)
+ break;
+ sdev_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_dev_pending, (void *)sdev);
+ break;
+ default:
+ break;
+ }
out_unlock:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
@@ -2713,14 +3747,6 @@ out_unlock:
atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
- if (!retval) {
- /*
- * Flush all IRQ handlers by calling synchronize_irq().
- * mpi3mr_ioc_disable_intr() takes care of it.
- */
- mpi3mr_ioc_disable_intr(mrioc);
- mpi3mr_ioc_enable_intr(mrioc);
- }
out:
return retval;
}
@@ -2769,17 +3795,46 @@ static int mpi3mr_bios_param(struct scsi_device *sdev,
* mpi3mr_map_queues - Map queues callback handler
* @shost: SCSI host reference
*
- * Call the blk_mq_pci_map_queues with from which operational
- * queue the mapping has to be done
+ * Maps default and poll queues.
*
- * Return: return of blk_mq_pci_map_queues
+ * Return: return zero.
*/
-static int mpi3mr_map_queues(struct Scsi_Host *shost)
+static void mpi3mr_map_queues(struct Scsi_Host *shost)
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int i, qoff, offset;
+ struct blk_mq_queue_map *map = NULL;
+
+ offset = mrioc->op_reply_q_offset;
+
+ for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+ map = &shost->tag_set.map[i];
+
+ map->nr_queues = 0;
+
+ if (i == HCTX_TYPE_DEFAULT)
+ map->nr_queues = mrioc->default_qcount;
+ else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = mrioc->active_poll_qcount;
+
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- mrioc->pdev, mrioc->op_reply_q_offset);
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+ }
}
/**
@@ -2938,6 +3993,13 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
sdev_printk(KERN_INFO, scmd->device,
"Target Reset is issued to handle(0x%04x)\n",
dev_handle);
@@ -2945,15 +4007,22 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
ret = mpi3mr_issue_tm(mrioc,
MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
- MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
if (ret)
goto out;
+ if (stgt_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: target has %d pending commands, target reset is failed\n",
+ mrioc->name, stgt_priv_data->pend_count);
+ goto out;
+ }
+
retval = SUCCESS;
out:
sdev_printk(KERN_INFO, scmd->device,
- "Target reset is %s for scmd(%p)\n",
+ "%s: target reset is %s for scmd(%p)\n", mrioc->name,
((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return retval;
@@ -2992,21 +4061,34 @@ static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
sdev_printk(KERN_INFO, scmd->device,
"Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
ret = mpi3mr_issue_tm(mrioc,
MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
- MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
if (ret)
goto out;
+ if (sdev_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device has %d pending commands, device(LUN) reset is failed\n",
+ mrioc->name, sdev_priv_data->pend_count);
+ goto out;
+ }
retval = SUCCESS;
out:
sdev_printk(KERN_INFO, scmd->device,
- "Device(lun) reset is %s for scmd(%p)\n",
+ "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return retval;
@@ -3049,32 +4131,43 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+ u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- if (time >= (pe_timeout * HZ)) {
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ ioc_err(mrioc, "port enable failed due to fault or reset\n");
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
mrioc->init_cmds.is_waiting = 0;
mrioc->init_cmds.callback = NULL;
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
- ioc_err(mrioc, "%s :port enable request timed out\n", __func__);
- mrioc->is_driver_loading = 0;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_PE_TIMEOUT, 1);
}
- if (mrioc->scan_failed) {
- ioc_err(mrioc,
- "%s :port enable failed with (ioc_status=0x%08x)\n",
- __func__, mrioc->scan_failed);
- mrioc->is_driver_loading = 0;
- mrioc->stop_drv_processing = 1;
- return 1;
+ if (time >= (pe_timeout * HZ)) {
+ ioc_err(mrioc, "port enable failed due to time out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
}
if (mrioc->scan_started)
return 0;
- ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__);
+
+ if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable is successfully completed\n");
+
mpi3mr_start_watchdog(mrioc);
mrioc->is_driver_loading = 0;
-
+ mrioc->stop_bsgs = 0;
return 1;
}
@@ -3091,9 +4184,10 @@ static void mpi3mr_slave_destroy(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
unsigned long flags;
struct scsi_target *starget;
+ struct sas_rphy *rphy = NULL;
if (!sdev->hostdata)
return;
@@ -3106,7 +4200,14 @@ static void mpi3mr_slave_destroy(struct scsi_device *sdev)
scsi_tgt_priv_data->num_luns--;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
+
if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
tgt_dev->starget = NULL;
if (tgt_dev)
@@ -3171,28 +4272,47 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
struct scsi_target *starget;
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
unsigned long flags;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
starget = scsi_target(sdev);
shost = dev_to_shost(&starget->dev);
mrioc = shost_priv(shost);
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
if (!tgt_dev)
return -ENXIO;
mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
+
+ sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT;
+ blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT);
+
switch (tgt_dev->dev_type) {
case MPI3_DEVICE_DEVFORM_PCIE:
/*The block layer hw sector size = 512*/
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgt_dev->dev_spec.pcie_inf.mdts / 512);
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ if ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgt_dev->dev_spec.pcie_inf.mdts / 512);
+ if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ }
break;
default:
break;
@@ -3216,11 +4336,12 @@ static int mpi3mr_slave_alloc(struct scsi_device *sdev)
struct Scsi_Host *shost;
struct mpi3mr_ioc *mrioc;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
- struct mpi3mr_tgt_dev *tgt_dev;
+ struct mpi3mr_tgt_dev *tgt_dev = NULL;
struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
unsigned long flags;
struct scsi_target *starget;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
starget = scsi_target(sdev);
shost = dev_to_shost(&starget->dev);
@@ -3228,7 +4349,14 @@ static int mpi3mr_slave_alloc(struct scsi_device *sdev)
scsi_tgt_priv_data = starget->hostdata;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+
+ if (starget->channel == mrioc->scsi_device_channel)
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ }
if (tgt_dev) {
if (tgt_dev->starget == NULL)
@@ -3271,6 +4399,8 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
struct mpi3mr_tgt_dev *tgt_dev;
unsigned long flags;
int retval = 0;
+ struct sas_rphy *rphy = NULL;
+ bool update_stgt_priv_data = false;
scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
if (!scsi_tgt_priv_data)
@@ -3279,8 +4409,25 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
starget->hostdata = scsi_tgt_priv_data;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
- tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden) {
+
+ if (starget->channel == mrioc->scsi_device_channel) {
+ tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
+ if (tgt_dev && !tgt_dev->is_hidden)
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ } else if (mrioc->sas_transport_enabled && !starget->channel) {
+ rphy = dev_to_rphy(starget->dev.parent);
+ tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
+ update_stgt_priv_data = true;
+ else
+ retval = -ENXIO;
+ }
+
+ if (update_stgt_priv_data) {
scsi_tgt_priv_data->starget = starget;
scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
@@ -3289,8 +4436,12 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
tgt_dev->starget = starget;
atomic_set(&scsi_tgt_priv_data->block_io, 0);
retval = 0;
- } else
- retval = -ENXIO;
+ scsi_tgt_priv_data->io_throttle_enabled =
+ tgt_dev->io_throttle_enabled;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group =
+ tgt_dev->dev_spec.vd_inf.tg;
+ }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
return retval;
@@ -3312,9 +4463,22 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
struct scsi_cmnd *scmd)
{
unsigned char *buf;
- u16 param_len, desc_len;
-
- param_len = get_unaligned_be16(scmd->cmnd + 7);
+ u16 param_len, desc_len, trunc_param_len;
+
+ trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
+
+ if (mrioc->pdev->revision) {
+ if ((param_len > 24) && ((param_len - 8) & 0xF)) {
+ trunc_param_len -= (param_len - 8) & 0xF;
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ dprint_scsi_err(mrioc,
+ "truncating param_len from (%d) to (%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ }
+ return false;
+ }
if (!param_len) {
ioc_warn(mrioc,
@@ -3374,12 +4538,12 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
}
if (param_len > (desc_len + 8)) {
+ trunc_param_len = desc_len + 8;
scsi_print_command(scmd);
- ioc_warn(mrioc,
- "%s: Truncating param_len(%d) to desc_len+8(%d)\n",
- __func__, param_len, (desc_len + 8));
- param_len = desc_len + 8;
- put_unaligned_be16(param_len, scmd->cmnd + 7);
+ dprint_scsi_err(mrioc,
+ "truncating param_len(%d) to desc_len+8(%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
scsi_print_command(scmd);
}
@@ -3431,9 +4595,19 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
int retval = 0;
u16 dev_handle;
u16 host_tag;
- u32 scsiio_flags = 0;
+ u32 scsiio_flags = 0, data_len_blks = 0;
struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
+ u8 is_pcie_dev = 0;
+ u32 tracked_io_sz = 0;
+ u32 ioc_pend_data_len = 0, tg_pend_data_len = 0;
+ struct mpi3mr_throttle_group_info *tg = NULL;
+
+ if (mrioc->unrecoverable) {
+ scmd->result = DID_ERROR << 16;
+ scsi_done(scmd);
+ goto out;
+ }
sdev_priv_data = scmd->device->hostdata;
if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
@@ -3456,6 +4630,16 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ if (atomic_read(&stgt_priv_data->block_io)) {
+ if (mrioc->stop_drv_processing) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_done(scmd);
+ goto out;
+ }
+ retval = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto out;
+ }
+
dev_handle = stgt_priv_data->dev_handle;
if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
@@ -3468,18 +4652,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
- if (atomic_read(&stgt_priv_data->block_io)) {
- if (mrioc->stop_drv_processing) {
- scmd->result = DID_NO_CONNECT << 16;
- scsi_done(scmd);
- goto out;
- }
- retval = SCSI_MLQUEUE_DEVICE_BUSY;
- goto out;
- }
-
- if ((scmd->cmnd[0] == UNMAP) &&
- (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
+ is_pcie_dev = 1;
+ if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
+ (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
mpi3mr_check_return_unmap(mrioc, scmd))
goto out;
@@ -3529,11 +4705,50 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
+ data_len_blks = scsi_bufflen(scmd) >> 9;
+ if ((data_len_blks >= mrioc->io_throttle_data_length) &&
+ stgt_priv_data->io_throttle_enabled) {
+ tracked_io_sz = data_len_blks;
+ tg = stgt_priv_data->throttle_group;
+ if (tg) {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ tg_pend_data_len = atomic_add_return(data_len_blks,
+ &tg->pend_large_data_sz);
+ if (!tg->io_divert && ((ioc_pend_data_len >=
+ mrioc->io_throttle_high) ||
+ (tg_pend_data_len >= tg->high))) {
+ tg->io_divert = 1;
+ tg->need_qd_reduction = 1;
+ mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc,
+ tg, 1);
+ mpi3mr_queue_qd_reduction_event(mrioc, tg);
+ }
+ } else {
+ ioc_pend_data_len = atomic_add_return(data_len_blks,
+ &mrioc->pend_large_data_sz);
+ if (ioc_pend_data_len >= mrioc->io_throttle_high)
+ stgt_priv_data->io_divert = 1;
+ }
+ }
+
+ if (stgt_priv_data->io_divert) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING;
+ }
+ scsiio_req->flags = cpu_to_le32(scsiio_flags);
if (mpi3mr_op_request_post(mrioc, op_req_q,
scmd_priv_data->mpi3mr_scsiio_req)) {
mpi3mr_clear_scmd_priv(mrioc, scmd);
retval = SCSI_MLQUEUE_HOST_BUSY;
+ if (tracked_io_sz) {
+ atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz);
+ if (tg)
+ atomic_sub(tracked_io_sz,
+ &tg->pend_large_data_sz);
+ }
goto out;
}
@@ -3559,6 +4774,7 @@ static struct scsi_host_template mpi3mr_driver_template = {
.eh_host_reset_handler = mpi3mr_eh_host_reset,
.bios_param = mpi3mr_bios_param,
.map_queues = mpi3mr_map_queues,
+ .mq_poll = mpi3mr_blk_mq_poll,
.no_write_same = 1,
.can_queue = 1,
.this_id = -1,
@@ -3567,8 +4783,11 @@ static struct scsi_host_template mpi3mr_driver_template = {
*/
.max_sectors = 2048,
.cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
+ .max_segment_size = 0xffffffff,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scmd_priv),
+ .shost_groups = mpi3mr_host_groups,
+ .sdev_groups = mpi3mr_dev_groups,
};
/**
@@ -3710,14 +4929,23 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&mrioc->tgtdev_lock);
spin_lock_init(&mrioc->watchdog_lock);
spin_lock_init(&mrioc->chain_buf_lock);
+ spin_lock_init(&mrioc->sas_node_lock);
INIT_LIST_HEAD(&mrioc->fwevt_list);
INIT_LIST_HEAD(&mrioc->tgtdev_list);
INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
+ INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
+ INIT_LIST_HEAD(&mrioc->sas_expander_list);
+ INIT_LIST_HEAD(&mrioc->hba_port_table_list);
+ INIT_LIST_HEAD(&mrioc->enclosure_list);
mutex_init(&mrioc->reset_mutex);
mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
+ mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS);
+ mpi3mr_init_drv_cmd(&mrioc->transport_cmds,
+ MPI3MR_HOSTTAG_TRANSPORT_CMDS);
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
@@ -3730,6 +4958,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mrioc->logging_level = logging_level;
mrioc->shost = shost;
mrioc->pdev = pdev;
+ mrioc->stop_bsgs = 1;
/* init shost parameters */
shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
@@ -3739,6 +4968,8 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 0;
shost->max_id = 0xFFFFFFFF;
+ shost->host_tagset = 1;
+
if (prot_mask >= 0)
scsi_host_set_prot(shost, prot_mask);
else {
@@ -3767,26 +4998,34 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
"%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
- mrioc->fwevt_worker_name, WQ_MEM_RECLAIM);
+ mrioc->fwevt_worker_name, 0);
if (!mrioc->fwevt_worker_thread) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
retval = -ENODEV;
- goto out_fwevtthread_failed;
+ goto fwevtthread_failed;
}
mrioc->is_driver_loading = 1;
- if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) {
- ioc_err(mrioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
+ mrioc->cpu_count = num_online_cpus();
+ if (mpi3mr_setup_resources(mrioc)) {
+ ioc_err(mrioc, "setup resources failed\n");
retval = -ENODEV;
- goto out_iocinit_failed;
+ goto resource_alloc_failed;
+ }
+ if (mpi3mr_init_ioc(mrioc)) {
+ ioc_err(mrioc, "initializing IOC failed\n");
+ retval = -ENODEV;
+ goto init_ioc_failed;
}
shost->nr_hw_queues = mrioc->num_op_reply_q;
+ if (mrioc->active_poll_qcount)
+ shost->nr_maps = 3;
+
shost->can_queue = mrioc->max_host_ios;
shost->sg_tablesize = MPI3MR_SG_DEPTH;
- shost->max_id = mrioc->facts.max_perids;
+ shost->max_id = mrioc->facts.max_perids + 1;
retval = scsi_add_host(shost, &pdev->dev);
if (retval) {
@@ -3796,13 +5035,18 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
scsi_scan_host(shost);
+ mpi3mr_bsg_init(mrioc);
return retval;
addhost_failed:
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
-out_iocinit_failed:
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+init_ioc_failed:
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+resource_alloc_failed:
destroy_workqueue(mrioc->fwevt_worker_thread);
-out_fwevtthread_failed:
+fwevtthread_failed:
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
@@ -3815,6 +5059,7 @@ shost_failed:
* mpi3mr_remove - PCI remove callback
* @pdev: PCI device instance
*
+ * Cleanup the IOC by issuing MUR and shutdown notification.
* Free up all memory and resources associated with the
* controllerand target devices, unregister the shost.
*
@@ -3835,6 +5080,12 @@ static void mpi3mr_remove(struct pci_dev *pdev)
while (mrioc->reset_in_progress || mrioc->is_driver_loading)
ssleep(1);
+ if (!pci_device_is_present(mrioc->pdev)) {
+ mrioc->unrecoverable = 1;
+ mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
+ }
+
+ mpi3mr_bsg_exit(mrioc);
mrioc->stop_drv_processing = 1;
mpi3mr_cleanup_fwevt_list(mrioc);
spin_lock_irqsave(&mrioc->fwevt_lock, flags);
@@ -3843,7 +5094,11 @@ static void mpi3mr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
if (wq)
destroy_workqueue(wq);
- scsi_remove_host(shost);
+
+ if (mrioc->sas_transport_enabled)
+ sas_remove_host(shost);
+ else
+ scsi_remove_host(shost);
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
@@ -3851,7 +5106,10 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
}
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
@@ -3891,25 +5149,27 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
if (wq)
destroy_workqueue(wq);
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
}
-#ifdef CONFIG_PM
/**
* mpi3mr_suspend - PCI power management suspend callback
- * @pdev: PCI device instance
- * @state: New power state
+ * @dev: Device struct
*
* Change the power state to the given value and cleanup the IOC
* by issuing MUR and shutdown notification
*
* Return: 0 always.
*/
-static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+mpi3mr_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct mpi3mr_ioc *mrioc;
- pci_power_t device_state;
if (!shost)
return 0;
@@ -3921,13 +5181,10 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
mpi3mr_cleanup_fwevt_list(mrioc);
scsi_block_requests(shost);
mpi3mr_stop_watchdog(mrioc);
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND);
+ mpi3mr_cleanup_ioc(mrioc);
- device_state = pci_choose_state(pdev, state);
- ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- pdev, pci_name(pdev), device_state);
- pci_save_state(pdev);
- pci_set_power_state(pdev, device_state);
+ ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n",
+ pdev, pci_name(pdev));
mpi3mr_cleanup_resources(mrioc);
return 0;
@@ -3935,15 +5192,17 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
/**
* mpi3mr_resume - PCI power management resume callback
- * @pdev: PCI device instance
+ * @dev: Device struct
*
* Restore the power state to D0 and reinitialize the controller
* and resume I/O operations to the target devices
*
* Return: 0 on success, non-zero on failure
*/
-static int mpi3mr_resume(struct pci_dev *pdev)
+static int __maybe_unused
+mpi3mr_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct mpi3mr_ioc *mrioc;
pci_power_t device_state = pdev->current_state;
@@ -3956,9 +5215,6 @@ static int mpi3mr_resume(struct pci_dev *pdev)
ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
mrioc->pdev = pdev;
mrioc->cpu_count = num_online_cpus();
r = mpi3mr_setup_resources(mrioc);
@@ -3969,36 +5225,48 @@ static int mpi3mr_resume(struct pci_dev *pdev)
}
mrioc->stop_drv_processing = 0;
+ mpi3mr_invalidate_devhandles(mrioc);
+ mpi3mr_free_enclosure_list(mrioc);
mpi3mr_memset_buffers(mrioc);
- mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME);
+ r = mpi3mr_reinit_ioc(mrioc, 1);
+ if (r) {
+ ioc_err(mrioc, "resuming controller failed[%d]\n", r);
+ return r;
+ }
+ ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME);
scsi_unblock_requests(shost);
+ mrioc->device_refresh_on = 0;
mpi3mr_start_watchdog(mrioc);
return 0;
}
-#endif
static const struct pci_device_id mpi3mr_pci_id_table[] = {
{
- PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5,
- PCI_ANY_ID, PCI_ANY_ID)
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
+static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume);
+
static struct pci_driver mpi3mr_pci_driver = {
.name = MPI3MR_DRIVER_NAME,
.id_table = mpi3mr_pci_id_table,
.probe = mpi3mr_probe,
.remove = mpi3mr_remove,
.shutdown = mpi3mr_shutdown,
-#ifdef CONFIG_PM
- .suspend = mpi3mr_suspend,
- .resume = mpi3mr_resume,
-#endif
+ .driver.pm = &mpi3mr_pm_ops,
};
+static ssize_t event_counter_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%llu\n", atomic64_read(&event_counter));
+}
+static DRIVER_ATTR_RO(event_counter);
+
static int __init mpi3mr_init(void)
{
int ret_val;
@@ -4006,8 +5274,33 @@ static int __init mpi3mr_init(void)
pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
MPI3MR_DRIVER_VERSION);
+ mpi3mr_transport_template =
+ sas_attach_transport(&mpi3mr_transport_functions);
+ if (!mpi3mr_transport_template) {
+ pr_err("%s failed to load due to sas transport attach failure\n",
+ MPI3MR_DRIVER_NAME);
+ return -ENODEV;
+ }
+
ret_val = pci_register_driver(&mpi3mr_pci_driver);
+ if (ret_val) {
+ pr_err("%s failed to load due to pci register driver failure\n",
+ MPI3MR_DRIVER_NAME);
+ goto err_pci_reg_fail;
+ }
+
+ ret_val = driver_create_file(&mpi3mr_pci_driver.driver,
+ &driver_attr_event_counter);
+ if (ret_val)
+ goto err_event_counter;
+
+ return ret_val;
+
+err_event_counter:
+ pci_unregister_driver(&mpi3mr_pci_driver);
+err_pci_reg_fail:
+ sas_release_transport(mpi3mr_transport_template);
return ret_val;
}
@@ -4021,7 +5314,10 @@ static void __exit mpi3mr_exit(void)
pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
MPI3MR_DRIVER_VERSION);
+ driver_remove_file(&mpi3mr_pci_driver.driver,
+ &driver_attr_event_counter);
pci_unregister_driver(&mpi3mr_pci_driver);
+ sas_release_transport(mpi3mr_transport_template);
}
module_init(mpi3mr_init);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
new file mode 100644
index 000000000000..3fc897336b5e
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -0,0 +1,3291 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for Broadcom MPI3 Storage Controllers
+ *
+ * Copyright (C) 2017-2022 Broadcom Inc.
+ * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
+ *
+ */
+
+#include "mpi3mr.h"
+
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander);
+
+/**
+ * mpi3mr_post_transport_req - Issue transport requests and wait
+ * @mrioc: Adapter instance reference
+ * @request: Properly populated MPI3 request
+ * @request_sz: Size of the MPI3 request
+ * @reply: Pointer to return MPI3 reply
+ * @reply_sz: Size of the MPI3 reply buffer
+ * @timeout: Timeout in seconds
+ * @ioc_status: Pointer to return ioc status
+ *
+ * A generic function for posting MPI3 requests from the SAS
+ * transport layer that uses transport command infrastructure.
+ * This blocks for the completion of request for timeout seconds
+ * and if the request times out this function faults the
+ * controller with proper reason code.
+ *
+ * On successful completion of the request this function returns
+ * appropriate ioc status from the firmware back to the caller.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_post_transport_req(struct mpi3mr_ioc *mrioc, void *request,
+ u16 request_sz, void *reply, u16 reply_sz, int timeout,
+ u16 *ioc_status)
+{
+ int retval = 0;
+
+ mutex_lock(&mrioc->transport_cmds.mutex);
+ if (mrioc->transport_cmds.state & MPI3MR_CMD_PENDING) {
+ retval = -1;
+ ioc_err(mrioc, "sending transport request failed due to command in use\n");
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+ goto out;
+ }
+ mrioc->transport_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->transport_cmds.is_waiting = 1;
+ mrioc->transport_cmds.callback = NULL;
+ mrioc->transport_cmds.ioc_status = 0;
+ mrioc->transport_cmds.ioc_loginfo = 0;
+
+ init_completion(&mrioc->transport_cmds.done);
+ dprint_cfg_info(mrioc, "posting transport request\n");
+ if (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO)
+ dprint_dump(request, request_sz, "transport_req");
+ retval = mpi3mr_admin_request_post(mrioc, request, request_sz, 1);
+ if (retval) {
+ ioc_err(mrioc, "posting transport request failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->transport_cmds.done,
+ (timeout * HZ));
+ if (!(mrioc->transport_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_SAS_TRANSPORT_TIMEOUT);
+ ioc_err(mrioc, "transport request timed out\n");
+ retval = -1;
+ goto out_unlock;
+ }
+ *ioc_status = mrioc->transport_cmds.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK;
+ if ((*ioc_status) != MPI3_IOCSTATUS_SUCCESS)
+ dprint_transport_err(mrioc,
+ "transport request returned with ioc_status(0x%04x), log_info(0x%08x)\n",
+ *ioc_status, mrioc->transport_cmds.ioc_loginfo);
+
+ if ((reply) && (mrioc->transport_cmds.state & MPI3MR_CMD_REPLY_VALID))
+ memcpy((u8 *)reply, mrioc->transport_cmds.reply, reply_sz);
+
+out_unlock:
+ mrioc->transport_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->transport_cmds.mutex);
+
+out:
+ return retval;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * mpi3mr_report_manufacture - obtain SMP report_manufacture
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the expander device
+ * @edev: SAS transport layer sas_expander_device object
+ * @port_id: ID of the HBA port
+ *
+ * Fills in the sas_expander_device with manufacturing info.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_report_manufacture(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct sas_expander_device *edev, u8 port_id)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc = 0;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u8 *tmp;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev,
+ data_out_sz + data_in_sz, &data_out_dma, GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ manufacture_reply = data_out + data_out_sz;
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) port_id;
+ mpi_request.sas_address = cpu_to_le64(sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending report manufacturer SMP request to sas_address(0x%016llx), port(%d)\n",
+ (unsigned long long)sas_address, port_id);
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "report manufacturer SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "report manufacturer - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct rep_manu_reply)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ strscpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strscpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strscpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strscpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ return rc;
+}
+
+/**
+ * __mpi3mr_expander_find_by_handle - expander search by handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the expander
+ *
+ * Context: The caller should acquire sas_node_lock
+ *
+ * This searches for expander device based on handle, then
+ * returns the sas_node object.
+ *
+ * Return: Expander sas_node object reference or NULL
+ */
+struct mpi3mr_sas_node *__mpi3mr_expander_find_by_handle(struct mpi3mr_ioc
+ *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpi3mr_is_expander_device - if device is an expander
+ * @device_info: Bitfield providing information about the device
+ *
+ * Return: 1 if the device is expander device, else 0.
+ */
+u8 mpi3mr_is_expander_device(u16 device_info)
+{
+ if ((device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) ==
+ MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * mpi3mr_get_sas_address - retrieve sas_address for handle
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @sas_address: Address to hold sas address
+ *
+ * This function issues device page0 read for a given device
+ * handle and gets the SAS address and return it back
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+static int mpi3mr_get_sas_address(struct mpi3mr_ioc *mrioc, u16 handle,
+ u64 *sas_address)
+{
+ struct mpi3_device_page0 dev_pg0;
+ u16 ioc_status;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ *sas_address = 0;
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (le16_to_cpu(dev_pg0.flags) &
+ MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE)
+ *sas_address = mrioc->sas_hba.sas_address;
+ else if (dev_pg0.device_form == MPI3_DEVICE_DEVFORM_SAS_SATA) {
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ *sas_address = le64_to_cpu(sasinf->sas_address);
+ } else {
+ ioc_err(mrioc, "%s: device_form(%d) is not SAS_SATA\n",
+ __func__, dev_pg0.device_form);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.hba_port == hba_port))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_get_tgtdev_by_addr - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device from sas address and hba port
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Context: This function will acquire tgtdev_lock and will
+ * release before returning the mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_addr(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+
+ if (!hba_port)
+ goto out;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc, sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+out:
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_remove_device_by_sas_address - remove the device
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @hba_port: HBA port entry
+ *
+ * This searches for target device using sas address and hba
+ * port pointer then removes it from the OS.
+ *
+ * Return: None
+ */
+static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ u8 was_on_tgtdev_list = 0;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr(mrioc,
+ sas_address, hba_port);
+ if (tgtdev) {
+ if (!list_empty(&tgtdev->list)) {
+ list_del_init(&tgtdev->list);
+ was_on_tgtdev_list = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ if (was_on_tgtdev_list) {
+ if (tgtdev->host_exposed)
+ mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+}
+
+/**
+ * __mpi3mr_get_tgtdev_by_addr_and_rphy - target device search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of the device
+ * @rphy: SAS transport layer rphy object
+ *
+ * This searches for target device from sas address and rphy
+ * pointer then return mpi3mr_tgt_dev object.
+ *
+ * Return: Valid tget_dev or NULL
+ */
+struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_addr_and_rphy(
+ struct mpi3mr_ioc *mrioc, u64 sas_address, struct sas_rphy *rphy)
+{
+ struct mpi3mr_tgt_dev *tgtdev;
+
+ assert_spin_locked(&mrioc->tgtdev_lock);
+
+ list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
+ if ((tgtdev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA) &&
+ (tgtdev->dev_spec.sas_sata_inf.sas_address == sas_address)
+ && (tgtdev->dev_spec.sas_sata_inf.rphy == rphy))
+ goto found_device;
+ return NULL;
+found_device:
+ mpi3mr_tgtdev_get(tgtdev);
+ return tgtdev;
+}
+
+/**
+ * mpi3mr_expander_find_by_sas_address - sas expander search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander
+ * @hba_port: HBA port entry
+ *
+ * Return: A valid SAS expander node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *mpi3mr_expander_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander, *r = NULL;
+
+ if (!hba_port)
+ goto out;
+
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ if ((sas_expander->sas_address != sas_address) ||
+ (sas_expander->hba_port != hba_port))
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+out:
+ return r;
+}
+
+/**
+ * __mpi3mr_sas_node_find_by_sas_address - sas node search
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of expander or sas host
+ * @hba_port: HBA port entry
+ * Context: Caller should acquire mrioc->sas_node_lock.
+ *
+ * If the SAS address indicates the device is direct attached to
+ * the controller (controller's SAS address) then the SAS node
+ * associated with the controller is returned back else the SAS
+ * address and hba port are used to identify the exact expander
+ * and the associated sas_node object is returned. If there is
+ * no match NULL is returned.
+ *
+ * Return: A valid SAS node or NULL.
+ *
+ */
+static struct mpi3mr_sas_node *__mpi3mr_sas_node_find_by_sas_address(
+ struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+
+ if (mrioc->sas_hba.sas_address == sas_address)
+ return &mrioc->sas_hba;
+ return mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+}
+
+/**
+ * mpi3mr_parent_present - Is parent present for a phy
+ * @mrioc: Adapter instance reference
+ * @phy: SAS transport layer phy object
+ *
+ * Return: 0 if parent is present else non-zero
+ */
+static int mpi3mr_parent_present(struct mpi3mr_ioc *mrioc, struct sas_phy *phy)
+{
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ if (__mpi3mr_sas_node_find_by_sas_address(mrioc,
+ phy->identify.sas_address,
+ hba_port) == NULL) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return 0;
+}
+
+/**
+ * mpi3mr_convert_phy_link_rate -
+ * @link_rate: link rate as defined in the MPI header
+ *
+ * Convert link_rate from mpi format into sas_transport layer
+ * form.
+ *
+ * Return: A valid SAS transport layer defined link rate
+ */
+static enum sas_linkrate mpi3mr_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI3_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_22_5:
+ rc = SAS_LINK_RATE_22_5_GBPS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+ case MPI3_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI3_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ default:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * mpi3mr_delete_sas_phy - Remove a single phy from port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mr_sas_phy->phy_id);
+
+ list_del(&mr_sas_phy->port_siblings);
+ mr_sas_port->num_phys--;
+ mr_sas_port->phy_mask &= ~(1 << mr_sas_phy->phy_id);
+ if (mr_sas_port->lowest_phy == mr_sas_phy->phy_id)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_delete_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * mpi3mr_add_sas_phy - Adding a single phy to a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_sas_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port,
+ struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+
+ dev_info(&mr_sas_phy->phy->dev,
+ "add: sas_address(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mr_sas_phy->phy_id);
+
+ list_add_tail(&mr_sas_phy->port_siblings, &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << mr_sas_phy->phy_id);
+ if (mr_sas_phy->phy_id < mr_sas_port->lowest_phy)
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+ sas_port_add_phy(mr_sas_port->port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * mpi3mr_add_phy_to_an_existing_port - add phy to existing port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object *
+ * @sas_address: SAS address of device/expander were phy needs
+ * to be added to
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+static void mpi3mr_add_phy_to_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy,
+ u64 sas_address, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_port *mr_sas_port;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ if (!hba_port)
+ return;
+
+ list_for_each_entry(mr_sas_port, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy == mr_sas_phy)
+ return;
+ }
+ mpi3mr_add_sas_phy(mrioc, mr_sas_port, mr_sas_phy);
+ return;
+ }
+}
+
+/**
+ * mpi3mr_delete_sas_port - helper function to removing a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_port: Internal Port object
+ *
+ * Return: None.
+ */
+static void mpi3mr_delete_sas_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ u64 sas_address = mr_sas_port->remote_identify.sas_address;
+ struct mpi3mr_hba_port *hba_port = mr_sas_port->hba_port;
+ enum sas_device_type device_type =
+ mr_sas_port->remote_identify.device_type;
+
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ if (device_type == SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc, sas_address,
+ hba_port);
+
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc, sas_address, hba_port);
+}
+
+/**
+ * mpi3mr_del_phy_from_an_existing_port - del phy from a port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @mr_sas_phy: Internal Phy object
+ *
+ * Return: None.
+ */
+static void mpi3mr_del_phy_from_an_existing_port(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, struct mpi3mr_sas_phy *mr_sas_phy)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_phy *srch_phy;
+
+ if (mr_sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(srch_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if (srch_phy != mr_sas_phy)
+ continue;
+ if ((mr_sas_port->num_phys == 1) &&
+ !mrioc->reset_in_progress)
+ mpi3mr_delete_sas_port(mrioc, mr_sas_port);
+ else
+ mpi3mr_delete_sas_phy(mrioc, mr_sas_port,
+ mr_sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * mpi3mr_sas_port_sanity_check - sanity check while adding port
+ * @mrioc: Adapter instance reference
+ * @mr_sas_node: Internal sas node object (expander or host)
+ * @sas_address: SAS address of device/expander
+ * @hba_port: HBA port entry
+ *
+ * Verifies whether the Phys attached to a device with the given
+ * SAS address already belongs to an existing sas port if so
+ * will remove those phys from the sas port
+ *
+ * Return: None.
+ */
+static void mpi3mr_sas_port_sanity_check(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *mr_sas_node, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ sas_address) || (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ if (mr_sas_node->phy[i].phy_belongs_to_port == 1)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ mr_sas_node, &mr_sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpi3mr_set_identify - set identify for phys and end devices
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle
+ * @identify: SAS transport layer's identify info
+ *
+ * Populates sas identify info for a specific device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_set_identify(struct mpi3mr_ioc *mrioc, u16 handle,
+ struct sas_identify *identify)
+{
+
+ struct mpi3_device_page0 device_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ u16 device_info;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &device_pg0,
+ sizeof(device_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ sasinf = &device_pg0.device_specific.sas_sata_format;
+ device_info = le16_to_cpu(sasinf->device_info);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sasinf->sas_address);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sasinf->phy_num;
+
+ /* device_type */
+ switch (device_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) {
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for SATA INIT so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+
+ /* target_port_protocols */
+ if (device_info & MPI3_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ /* MPI3.0 doesn't have define for STP Target so setting both here*/
+ if (device_info & MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET)
+ identify->target_port_protocols |= (SAS_PROTOCOL_STP |
+ SAS_PROTOCOL_SATA);
+ if (device_info & MPI3_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_host_phy - report sas_host phy to SAS transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @phy_pg0: SAS phy page 0
+ * @parent_dev: Prent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_host_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy, struct mpi3_sas_phy_page0 phy_pg0,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle = le16_to_cpu(phy_pg0.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ phy_pg0.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_add_expander_phy - report expander phy to transport
+ * @mrioc: Adapter instance reference
+ * @mr_sas_phy: Internal Phy object
+ * @expander_pg1: SAS Expander page 1
+ * @parent_dev: Parent device class object
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_add_expander_phy(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_phy *mr_sas_phy,
+ struct mpi3_sas_expander_page1 expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mr_sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mr_sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((mpi3mr_set_identify(mrioc, mr_sas_phy->handle,
+ &mr_sas_phy->identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mr_sas_phy->identify;
+ mr_sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.attached_dev_handle);
+ if (mr_sas_phy->attached_handle)
+ mpi3mr_set_identify(mrioc, mr_sas_phy->attached_handle,
+ &mr_sas_phy->remote_identify);
+ phy->identify.phy_identifier = mr_sas_phy->phy_id;
+ phy->negotiated_linkrate = mpi3mr_convert_phy_link_rate(
+ (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ phy->minimum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate & MPI3_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = mpi3mr_convert_phy_link_rate(
+ expander_pg1.hw_link_rate >> 4);
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate & MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ expander_pg1.programmed_link_rate >> 4);
+ phy->hostdata = mr_sas_phy->hba_port;
+
+ if ((sas_phy_add(phy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&phy->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ mr_sas_phy->handle, (unsigned long long)
+ mr_sas_phy->identify.sas_address,
+ mr_sas_phy->attached_handle,
+ (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+ mr_sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpi3mr_alloc_hba_port - alloc hba port object
+ * @mrioc: Adapter instance reference
+ * @port_id: Port number
+ *
+ * Alloc memory for hba port object.
+ */
+static struct mpi3mr_hba_port *
+mpi3mr_alloc_hba_port(struct mpi3mr_ioc *mrioc, u16 port_id)
+{
+ struct mpi3mr_hba_port *hba_port;
+
+ hba_port = kzalloc(sizeof(struct mpi3mr_hba_port),
+ GFP_KERNEL);
+ if (!hba_port)
+ return NULL;
+ hba_port->port_id = port_id;
+ ioc_info(mrioc, "hba_port entry: %p, port: %d is added to hba_port list\n",
+ hba_port, hba_port->port_id);
+ list_add_tail(&hba_port->list, &mrioc->hba_port_table_list);
+ return hba_port;
+}
+
+/**
+ * mpi3mr_get_hba_port_by_id - find hba port by id
+ * @mrioc: Adapter instance reference
+ * @port_id - Port ID to search
+ *
+ * Return: mpi3mr_hba_port reference for the matched port
+ */
+
+struct mpi3mr_hba_port *mpi3mr_get_hba_port_by_id(struct mpi3mr_ioc *mrioc,
+ u8 port_id)
+{
+ struct mpi3mr_hba_port *port, *port_next;
+
+ list_for_each_entry_safe(port, port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (port->port_id != port_id)
+ continue;
+ if (port->flags & MPI3MR_HBA_PORT_FLAG_DIRTY)
+ continue;
+ return port;
+ }
+
+ return NULL;
+}
+
+/**
+ * mpi3mr_update_links - refreshing SAS phy link changes
+ * @mrioc: Adapter instance reference
+ * @sas_address_parent: SAS address of parent expander or host
+ * @handle: Firmware device handle of attached device
+ * @phy_number: Phy number
+ * @link_rate: New link rate
+ * @hba_port: HBA port entry
+ *
+ * Return: None.
+ */
+void mpi3mr_update_links(struct mpi3mr_ioc *mrioc,
+ u64 sas_address_parent, u16 handle, u8 phy_number, u8 link_rate,
+ struct mpi3mr_hba_port *hba_port)
+{
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct mpi3mr_sas_phy *mr_sas_phy;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ mr_sas_phy = &mr_sas_node->phy[phy_number];
+ mr_sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI3_SAS_NEG_LINK_RATE_1_5)) {
+ mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_phy->remote_identify);
+ mpi3mr_add_phy_to_an_existing_port(mrioc, mr_sas_node,
+ mr_sas_phy, mr_sas_phy->remote_identify.sas_address,
+ hba_port);
+ } else
+ memset(&mr_sas_phy->remote_identify, 0, sizeof(struct
+ sas_identify));
+
+ if (mr_sas_phy->phy)
+ mr_sas_phy->phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(link_rate);
+
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_phy->phy->dev,
+ "refresh: parent sas_address(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_address(0x%016llx)\n",
+ (unsigned long long)sas_address_parent,
+ link_rate, phy_number, handle, (unsigned long long)
+ mr_sas_phy->remote_identify.sas_address);
+}
+
+/**
+ * mpi3mr_sas_host_refresh - refreshing sas host object contents
+ * @mrioc: Adapter instance reference
+ *
+ * This function refreshes the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for each device addition or device info
+ * change events
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_refresh(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u8 link_rate;
+ u16 sz, port_id, attached_handle;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+
+ dprint_transport_info(mrioc,
+ "updating handles for sas_host(0x%016llx)\n",
+ (unsigned long long)mrioc->sas_hba.sas_address);
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ link_rate =
+ sas_io_unit_pg0->phy_data[i].negotiated_link_rate >> 4;
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (attached_handle && link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_update_links(mrioc, mrioc->sas_hba.sas_address,
+ attached_handle, i, link_rate,
+ mrioc->sas_hba.phy[i].hba_port);
+ }
+ out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_host_add - create sas host object
+ * @mrioc: Adapter instance reference
+ *
+ * This function creates the controllers phy information and
+ * updates the SAS transport layer with updated information,
+ * this is executed for first device addition or device info
+ * change event.
+ *
+ * Return: None.
+ */
+void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ u16 sz, num_phys = 1, port_id, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_enclosure_page0 encl_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ num_phys = sas_io_unit_pg0->num_phys;
+ kfree(sas_io_unit_pg0);
+
+ mrioc->sas_hba.host_node = 1;
+ INIT_LIST_HEAD(&mrioc->sas_hba.sas_port_list);
+ mrioc->sas_hba.parent_dev = &mrioc->shost->shost_gendev;
+ mrioc->sas_hba.phy = kcalloc(num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!mrioc->sas_hba.phy)
+ return;
+
+ mrioc->sas_hba.num_phys = num_phys;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (num_phys * sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ mrioc->sas_hba.handle = 0;
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ if (sas_io_unit_pg0->phy_data[i].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))
+ continue;
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, i)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (!mrioc->sas_hba.handle)
+ mrioc->sas_hba.handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].controller_dev_handle);
+ port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+
+ if (!(mpi3mr_get_hba_port_by_id(mrioc, port_id)))
+ if (!mpi3mr_alloc_hba_port(mrioc, port_id))
+ goto out;
+
+ mrioc->sas_hba.phy[i].handle = mrioc->sas_hba.handle;
+ mrioc->sas_hba.phy[i].phy_id = i;
+ mrioc->sas_hba.phy[i].hba_port =
+ mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ mpi3mr_add_host_phy(mrioc, &mrioc->sas_hba.phy[i],
+ phy_pg0, mrioc->sas_hba.parent_dev);
+ }
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.handle))) {
+ ioc_err(mrioc, "%s: device page0 read failed\n", __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "device page read failed for handle(0x%04x), with ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ mrioc->sas_hba.handle, ioc_status, __FILE__, __LINE__,
+ __func__);
+ goto out;
+ }
+ mrioc->sas_hba.enclosure_handle =
+ le16_to_cpu(dev_pg0.enclosure_handle);
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+ mrioc->sas_hba.sas_address =
+ le64_to_cpu(sasinf->sas_address);
+ ioc_info(mrioc,
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ mrioc->sas_hba.handle,
+ (unsigned long long) mrioc->sas_hba.sas_address,
+ mrioc->sas_hba.num_phys);
+
+ if (mrioc->sas_hba.enclosure_handle) {
+ if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
+ &encl_pg0, sizeof(dev_pg0),
+ MPI3_ENCLOS_PGAD_FORM_HANDLE,
+ mrioc->sas_hba.enclosure_handle)) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS))
+ mrioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(encl_pg0.enclosure_logical_id);
+ }
+
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_sas_port_add - Expose the SAS device to the SAS TL
+ * @mrioc: Adapter instance reference
+ * @handle: Firmware device handle of the attached device
+ * @sas_address_parent: sas address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * This function creates a new sas port object for the given end
+ * device matching sas address and hba_port and adds it to the
+ * sas_node's sas_port_list and expose the attached sas device
+ * to the SAS transport layer through sas_rphy_add.
+ *
+ * Returns a valid mpi3mr_sas_port reference or NULL.
+ */
+static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc,
+ u16 handle, u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy, *next;
+ struct mpi3mr_sas_port *mr_sas_port;
+ unsigned long flags;
+ struct mpi3mr_sas_node *mr_sas_node;
+ struct sas_rphy *rphy;
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ int i;
+ struct sas_port *port;
+
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ mr_sas_port = kzalloc(sizeof(struct mpi3mr_sas_port), GFP_KERNEL);
+ if (!mr_sas_port)
+ return NULL;
+
+ INIT_LIST_HEAD(&mr_sas_port->port_list);
+ INIT_LIST_HEAD(&mr_sas_port->phy_list);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!mr_sas_node) {
+ ioc_err(mrioc, "%s:could not find parent sas_address(0x%016llx)!\n",
+ __func__, (unsigned long long)sas_address_parent);
+ goto out_fail;
+ }
+
+ if ((mpi3mr_set_identify(mrioc, handle,
+ &mr_sas_port->remote_identify))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mr_sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->hba_port = hba_port;
+ mpi3mr_sas_port_sanity_check(mrioc, mr_sas_node,
+ mr_sas_port->remote_identify.sas_address, hba_port);
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if ((mr_sas_node->phy[i].remote_identify.sas_address !=
+ mr_sas_port->remote_identify.sas_address) ||
+ (mr_sas_node->phy[i].hba_port != hba_port))
+ continue;
+ list_add_tail(&mr_sas_node->phy[i].port_siblings,
+ &mr_sas_port->phy_list);
+ mr_sas_port->num_phys++;
+ mr_sas_port->phy_mask |= (1 << i);
+ }
+
+ if (!mr_sas_port->num_phys) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ mr_sas_port->lowest_phy = ffs(mr_sas_port->phy_mask) - 1;
+
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev = mpi3mr_get_tgtdev_by_addr(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+
+ if (!tgtdev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 1;
+ }
+
+ if (!mr_sas_node->parent_dev) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(mr_sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mr_sas_phy, &mr_sas_port->phy_list,
+ port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&port->dev,
+ "add: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ sas_port_add_phy(port, mr_sas_phy->phy);
+ mr_sas_phy->phy_belongs_to_port = 1;
+ mr_sas_phy->hba_port = hba_port;
+ }
+
+ mr_sas_port->port = port;
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ rphy = sas_end_device_alloc(port);
+ tgtdev->dev_spec.sas_sata_inf.rphy = rphy;
+ } else {
+ rphy = sas_expander_alloc(port,
+ mr_sas_port->remote_identify.device_type);
+ }
+ rphy->identify = mr_sas_port->remote_identify;
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ if ((sas_rphy_add(rphy))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ }
+ if (mr_sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+ tgtdev->dev_spec.sas_sata_inf.pend_sas_rphy_add = 0;
+ tgtdev->dev_spec.sas_sata_inf.sas_transport_attached = 1;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+
+ dev_info(&rphy->dev,
+ "%s: added: handle(0x%04x), sas_address(0x%016llx)\n",
+ __func__, handle, (unsigned long long)
+ mr_sas_port->remote_identify.sas_address);
+
+ mr_sas_port->rphy = rphy;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&mr_sas_port->port_list, &mr_sas_node->sas_port_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, true);
+ }
+
+ /* fill in report manufacture */
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_report_manufacture(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy), hba_port->port_id);
+
+ return mr_sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mr_sas_phy, next, &mr_sas_port->phy_list,
+ port_siblings)
+ list_del(&mr_sas_phy->port_siblings);
+ kfree(mr_sas_port);
+ return NULL;
+}
+
+/**
+ * mpi3mr_sas_port_remove - remove port from the list
+ * @mrioc: Adapter instance reference
+ * @sas_address: SAS address of attached device
+ * @sas_address_parent: SAS address of parent expander or host
+ * @hba_port: HBA port entry
+ *
+ * Removing object and freeing associated memory from the
+ * sas_port_list.
+ *
+ * Return: None
+ */
+static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ u64 sas_address_parent, struct mpi3mr_hba_port *hba_port)
+{
+ int i;
+ unsigned long flags;
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ struct mpi3mr_sas_node *mr_sas_node;
+ u8 found = 0;
+ struct mpi3mr_sas_phy *mr_sas_phy, *next_phy;
+ struct mpi3mr_hba_port *srch_port, *hba_port_next = NULL;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ mr_sas_node = __mpi3mr_sas_node_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ if (!mr_sas_node) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mr_sas_port, next, &mr_sas_node->sas_port_list,
+ port_list) {
+ if (mr_sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ if (mr_sas_port->hba_port != hba_port)
+ continue;
+ found = 1;
+ list_del(&mr_sas_port->port_list);
+ goto out;
+ }
+
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ return;
+ }
+
+ if (mr_sas_node->host_node) {
+ list_for_each_entry_safe(srch_port, hba_port_next,
+ &mrioc->hba_port_table_list, list) {
+ if (srch_port != hba_port)
+ continue;
+ ioc_info(mrioc,
+ "removing hba_port entry: %p port: %d from hba_port list\n",
+ srch_port, srch_port->port_id);
+ list_del(&hba_port->list);
+ kfree(hba_port);
+ break;
+ }
+ }
+
+ for (i = 0; i < mr_sas_node->num_phys; i++) {
+ if (mr_sas_node->phy[i].remote_identify.sas_address ==
+ sas_address)
+ memset(&mr_sas_node->phy[i].remote_identify, 0,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (mrioc->current_event)
+ mrioc->current_event->pending_at_sml = 1;
+
+ list_for_each_entry_safe(mr_sas_phy, next_phy,
+ &mr_sas_port->phy_list, port_siblings) {
+ if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ dev_info(&mr_sas_port->port->dev,
+ "remove: sas_address(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_phy->phy_id);
+ mr_sas_phy->phy_belongs_to_port = 0;
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete_phy(mr_sas_port->port,
+ mr_sas_phy->phy);
+ list_del(&mr_sas_phy->port_siblings);
+ }
+ if (!mrioc->stop_drv_processing)
+ sas_port_delete(mr_sas_port->port);
+ ioc_info(mrioc, "%s: removed sas_address(0x%016llx)\n",
+ __func__, (unsigned long long)sas_address);
+
+ if (mrioc->current_event) {
+ mrioc->current_event->pending_at_sml = 0;
+ if (mrioc->current_event->discard)
+ mpi3mr_print_device_event_notice(mrioc, false);
+ }
+
+ kfree(mr_sas_port);
+}
+
+/**
+ * struct host_port - host port details
+ * @sas_address: SAS Address of the attached device
+ * @phy_mask: phy mask of host port
+ * @handle: Device Handle of attached device
+ * @iounit_port_id: port ID
+ * @used: host port is already matched with sas port from sas_port_list
+ * @lowest_phy: lowest phy ID of host port
+ */
+struct host_port {
+ u64 sas_address;
+ u32 phy_mask;
+ u16 handle;
+ u8 iounit_port_id;
+ u8 used;
+ u8 lowest_phy;
+};
+
+/**
+ * mpi3mr_update_mr_sas_port - update sas port objects during reset
+ * @mrioc: Adapter instance reference
+ * @h_port: host_port object
+ * @mr_sas_port: sas_port objects which needs to be updated
+ *
+ * Update the port ID of sas port object. Also add the phys if new phys got
+ * added to current sas port and remove the phys if some phys are moved
+ * out of the current sas port.
+ *
+ * Return: Nothing.
+ */
+static void
+mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
+ struct mpi3mr_sas_port *mr_sas_port)
+{
+ struct mpi3mr_sas_phy *mr_sas_phy;
+ u32 phy_mask_xor;
+ u64 phys_to_be_added, phys_to_be_removed;
+ int i;
+
+ h_port->used = 1;
+ mr_sas_port->marked_responding = 1;
+
+ dev_info(&mr_sas_port->port->dev,
+ "sas_address(0x%016llx), old: port_id %d phy_mask 0x%x, new: port_id %d phy_mask:0x%x\n",
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port->port_id, mr_sas_port->phy_mask,
+ h_port->iounit_port_id, h_port->phy_mask);
+
+ mr_sas_port->hba_port->port_id = h_port->iounit_port_id;
+ mr_sas_port->hba_port->flags &= ~MPI3MR_HBA_PORT_FLAG_DIRTY;
+
+ /* Get the newly added phys bit map & removed phys bit map */
+ phy_mask_xor = mr_sas_port->phy_mask ^ h_port->phy_mask;
+ phys_to_be_added = h_port->phy_mask & phy_mask_xor;
+ phys_to_be_removed = mr_sas_port->phy_mask & phy_mask_xor;
+
+ /*
+ * Register these new phys to current mr_sas_port's port.
+ * if these phys are previously registered with another port
+ * then delete these phys from that port first.
+ */
+ for_each_set_bit(i, (ulong *) &phys_to_be_added, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ mpi3mr_add_phy_to_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ /* Delete the phys which are not part of current mr_sas_port's port. */
+ for_each_set_bit(i, (ulong *) &phys_to_be_removed, BITS_PER_TYPE(u32)) {
+ mr_sas_phy = &mrioc->sas_hba.phy[i];
+ if (mr_sas_phy->phy_belongs_to_port)
+ mpi3mr_del_phy_from_an_existing_port(mrioc,
+ &mrioc->sas_hba, mr_sas_phy);
+ }
+}
+
+/**
+ * mpi3mr_refresh_sas_ports - update host's sas ports during reset
+ * @mrioc: Adapter instance reference
+ *
+ * Update the host's sas ports during reset by checking whether
+ * sas ports are still intact or not. Add/remove phys if any hba
+ * phys are (moved in)/(moved out) of sas port. Also update
+ * io_unit_port if it got changed during reset.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
+{
+ struct host_port h_port[32];
+ int i, j, found, host_port_count = 0, port_idx;
+ u16 sz, attached_handle, ioc_status;
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_device_page0 dev_pg0;
+ struct mpi3_device0_sas_sata_format *sasinf;
+ struct mpi3mr_sas_port *mr_sas_port;
+
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0)
+ return;
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* Create a new expander port table */
+ for (i = 0; i < mrioc->sas_hba.num_phys; i++) {
+ attached_handle = le16_to_cpu(
+ sas_io_unit_pg0->phy_data[i].attached_dev_handle);
+ if (!attached_handle)
+ continue;
+ found = 0;
+ for (j = 0; j < host_port_count; j++) {
+ if (h_port[j].handle == attached_handle) {
+ h_port[j].phy_mask |= (1 << i);
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ continue;
+ if ((mpi3mr_cfg_get_dev_pg0(mrioc, &ioc_status, &dev_pg0,
+ sizeof(dev_pg0), MPI3_DEVICE_PGAD_FORM_HANDLE,
+ attached_handle))) {
+ dprint_reset(mrioc,
+ "failed to read dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ attached_handle, __FILE__, __LINE__, __func__);
+ continue;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading dev_pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ ioc_status, attached_handle,
+ __FILE__, __LINE__, __func__);
+ continue;
+ }
+ sasinf = &dev_pg0.device_specific.sas_sata_format;
+
+ port_idx = host_port_count;
+ h_port[port_idx].sas_address = le64_to_cpu(sasinf->sas_address);
+ h_port[port_idx].handle = attached_handle;
+ h_port[port_idx].phy_mask = (1 << i);
+ h_port[port_idx].iounit_port_id = sas_io_unit_pg0->phy_data[i].io_unit_port;
+ h_port[port_idx].lowest_phy = sasinf->phy_num;
+ h_port[port_idx].used = 0;
+ host_port_count++;
+ }
+
+ if (!host_port_count)
+ goto out;
+
+ if (mrioc->logging_level & MPI3_DEBUG_RESET) {
+ ioc_info(mrioc, "Host port details before reset\n");
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ mr_sas_port->hba_port->port_id,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->phy_mask, mr_sas_port->lowest_phy);
+ }
+ mr_sas_port = NULL;
+ ioc_info(mrioc, "Host port details after reset\n");
+ for (i = 0; i < host_port_count; i++) {
+ ioc_info(mrioc,
+ "port_id:%d, sas_address:(0x%016llx), phy_mask:(0x%x), lowest phy id:%d\n",
+ h_port[i].iounit_port_id, h_port[i].sas_address,
+ h_port[i].phy_mask, h_port[i].lowest_phy);
+ }
+ }
+
+ /* mark all host sas port entries as dirty */
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ mr_sas_port->marked_responding = 0;
+ mr_sas_port->hba_port->flags |= MPI3MR_HBA_PORT_FLAG_DIRTY;
+ }
+
+ /* First check for matching lowest phy */
+ for (i = 0; i < host_port_count; i++) {
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].lowest_phy == mr_sas_port->lowest_phy) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if lowest phy is got enabled or disabled during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ if (h_port[i].phy_mask & mr_sas_port->phy_mask) {
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+ }
+
+ /* In case if expander cable is removed & connected to another HBA port during reset */
+ for (i = 0; i < host_port_count; i++) {
+ if (h_port[i].used)
+ continue;
+ mr_sas_port = NULL;
+ list_for_each_entry(mr_sas_port, &mrioc->sas_hba.sas_port_list,
+ port_list) {
+ if (mr_sas_port->marked_responding)
+ continue;
+ if (h_port[i].sas_address != mr_sas_port->remote_identify.sas_address)
+ continue;
+ mpi3mr_update_mr_sas_port(mrioc, &h_port[i], mr_sas_port);
+ break;
+ }
+ }
+out:
+ kfree(sas_io_unit_pg0);
+}
+
+/**
+ * mpi3mr_refresh_expanders - Refresh expander device exposure
+ * @mrioc: Adapter instance reference
+ *
+ * This is executed post controller reset to identify any
+ * missing expander devices during reset and remove from the upper layers
+ * or expose any newly detected expander device to the upper layers.
+ *
+ * Return: Nothing.
+ */
+void
+mpi3mr_refresh_expanders(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ u16 ioc_status, handle;
+ u64 sas_address;
+ int i;
+ unsigned long flags;
+ struct mpi3mr_hba_port *hba_port;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list, list) {
+ sas_expander->non_responding = 1;
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ sas_expander = NULL;
+
+ handle = 0xffff;
+
+ /* Search for responding expander devices and add them if they are newly got added */
+ while (true) {
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(struct mpi3_sas_expander_page0),
+ MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ dprint_reset(mrioc,
+ "failed to read exp pg0 for handle(0x%04x) at %s:%d/%s()!\n",
+ handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_reset(mrioc,
+ "ioc_status(0x%x) while reading exp pg0 for handle:(0x%04x), %s:%d/%s()!\n",
+ ioc_status, handle, __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ handle = le16_to_cpu(expander_pg0.dev_handle);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, expander_pg0.io_unit_port);
+
+ if (!hba_port) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (!sas_expander) {
+ mpi3mr_sas_host_refresh(mrioc);
+ mpi3mr_expander_add(mrioc, handle);
+ continue;
+ }
+
+ sas_expander->non_responding = 0;
+ if (sas_expander->handle == handle)
+ continue;
+
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ }
+
+ /*
+ * Delete non responding expander devices and the corresponding
+ * hba_port if the non responding expander device's parent device
+ * is a host node.
+ */
+ sas_expander = NULL;
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
+ &mrioc->sas_expander_list, list) {
+ if (sas_expander->non_responding) {
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_node_add - insert an expander to the list.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander sas node
+ * Context: This function will acquire sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return: None.
+ */
+static void mpi3mr_expander_node_add(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &mrioc->sas_expander_list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+}
+
+/**
+ * mpi3mr_expander_add - Create expander object
+ * @mrioc: Adapter instance reference
+ * @handle: Expander firmware device handle
+ *
+ * This function creating expander object, stored in
+ * sas_expander_list and expose it to the SAS transport
+ * layer.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_enclosure_node *enclosure_dev;
+ struct mpi3_sas_expander_page0 expander_pg0;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ u16 ioc_status, parent_handle, temp_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ u8 port_id, link_rate;
+ struct mpi3mr_sas_port *mr_sas_port = NULL;
+ struct mpi3mr_hba_port *hba_port;
+ u32 phynum_handle;
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (mrioc->reset_in_progress)
+ return -1;
+
+ if ((mpi3mr_cfg_get_sas_exp_pg0(mrioc, &ioc_status, &expander_pg0,
+ sizeof(expander_pg0), MPI3_SAS_EXPAND_PGAD_FORM_HANDLE, handle))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ parent_handle = le16_to_cpu(expander_pg0.parent_dev_handle);
+ if (mpi3mr_get_sas_address(mrioc, parent_handle, &sas_address_parent)
+ != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ port_id = expander_pg0.io_unit_port;
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ if (sas_address_parent != mrioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander =
+ mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address_parent, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = mpi3mr_expander_add(mrioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ } else {
+ /*
+ * When there is a parent expander present, update it's
+ * phys where child expander is connected with the link
+ * speed, attached dev handle and sas address.
+ */
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle =
+ (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ parent_handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc,
+ &ioc_status, &expander_pg1,
+ sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ return rc;
+ }
+ temp_handle = le16_to_cpu(
+ expander_pg1.attached_dev_handle);
+ if (temp_handle != handle)
+ continue;
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ mpi3mr_update_links(mrioc, sas_address_parent,
+ handle, i, link_rate, hba_port);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.sas_address);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc,
+ sas_address, hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
+ GFP_KERNEL);
+ if (!sas_expander)
+ return -1;
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.num_phys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+ sas_expander->hba_port = hba_port;
+
+ ioc_info(mrioc,
+ "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys) {
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct mpi3mr_sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mr_sas_port = mpi3mr_sas_port_add(mrioc, handle, sas_address_parent,
+ sas_expander->hba_port);
+ if (!mr_sas_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mr_sas_port->rphy->dev;
+ sas_expander->rphy = mr_sas_port->rphy;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ phynum_handle = (i << MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT) |
+ handle;
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+ sas_expander->phy[i].hba_port = hba_port;
+
+ if ((mpi3mr_add_expander_phy(mrioc, &sas_expander->phy[i],
+ expander_pg1, sas_expander->parent_dev))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ enclosure_dev =
+ mpi3mr_enclosure_find_by_handle(mrioc,
+ sas_expander->enclosure_handle);
+ if (enclosure_dev)
+ sas_expander->enclosure_logical_id = le64_to_cpu(
+ enclosure_dev->pg0.enclosure_logical_id);
+ }
+
+ mpi3mr_expander_node_add(mrioc, sas_expander);
+ return 0;
+
+out_fail:
+
+ if (mr_sas_port)
+ mpi3mr_sas_port_remove(mrioc,
+ sas_expander->sas_address,
+ sas_address_parent, sas_expander->hba_port);
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpi3mr_expander_node_remove - recursive removal of expander.
+ * @mrioc: Adapter instance reference
+ * @sas_expander: Expander device object
+ *
+ * Removes expander object and freeing associated memory from
+ * the sas_expander_list and removes the same from SAS TL, if
+ * one of the attached device is an expander then it recursively
+ * removes the expander device too.
+ *
+ * Return nothing.
+ */
+static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_sas_node *sas_expander)
+{
+ struct mpi3mr_sas_port *mr_sas_port, *next;
+ unsigned long flags;
+ u8 port_id;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mr_sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (mrioc->reset_in_progress)
+ return;
+ if (mr_sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpi3mr_remove_device_by_sas_address(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ else if (mr_sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mr_sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpi3mr_expander_remove(mrioc,
+ mr_sas_port->remote_identify.sas_address,
+ mr_sas_port->hba_port);
+ }
+
+ port_id = sas_expander->hba_port->port_id;
+ mpi3mr_sas_port_remove(mrioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent, sas_expander->hba_port);
+
+ ioc_info(mrioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address, port_id);
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * mpi3mr_expander_remove - Remove expander object
+ * @mrioc: Adapter instance reference
+ * @sas_address: Remove expander sas_address
+ * @hba_port: HBA port reference
+ *
+ * This function remove expander object, stored in
+ * mrioc->sas_expander_list and removes it from the SAS TL by
+ * calling mpi3mr_expander_node_remove().
+ *
+ * Return: None
+ */
+void mpi3mr_expander_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ struct mpi3mr_hba_port *hba_port)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ unsigned long flags;
+
+ if (mrioc->reset_in_progress)
+ return;
+
+ if (!hba_port)
+ return;
+
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ sas_expander = mpi3mr_expander_find_by_sas_address(mrioc, sas_address,
+ hba_port);
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ if (sas_expander)
+ mpi3mr_expander_node_remove(mrioc, sas_expander);
+
+}
+
+/**
+ * mpi3mr_get_sas_negotiated_logical_linkrate - get linkrate
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function identifies whether the target device is
+ * attached directly or through expander and issues sas phy
+ * page0 or expander phy page1 and gets the link rate, if there
+ * is any failure in reading the pages then this returns link
+ * rate of 1.5.
+ *
+ * Return: logical link rate.
+ */
+static u8 mpi3mr_get_sas_negotiated_logical_linkrate(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u8 link_rate = MPI3_SAS_NEG_LINK_RATE_1_5, phy_number;
+ struct mpi3_sas_expander_page1 expander_pg1;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u32 phynum_handle;
+ u16 ioc_status;
+
+ phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ if (!(tgtdev->devpg0_flag & MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED)) {
+ phynum_handle = ((phy_number<<MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT)
+ | tgtdev->parent_handle);
+ if (mpi3mr_cfg_get_sas_exp_pg1(mrioc, &ioc_status,
+ &expander_pg1, sizeof(expander_pg1),
+ MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM,
+ phynum_handle)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (expander_pg1.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy_number)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ link_rate = (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+out:
+ return link_rate;
+}
+
+/**
+ * mpi3mr_report_tgtdev_to_sas_transport - expose dev to SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function exposes the target device after
+ * preparing host_phy, setting up link rate etc.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ int retval = 0;
+ u8 link_rate, parent_phy_number;
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+ u8 port_id;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return -1;
+
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ if (!mrioc->sas_hba.num_phys)
+ mpi3mr_sas_host_add(mrioc);
+ else
+ mpi3mr_sas_host_refresh(mrioc);
+
+ if (mpi3mr_get_sas_address(mrioc, tgtdev->parent_handle,
+ &sas_address_parent) != 0) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.sas_address_parent = sas_address_parent;
+
+ parent_phy_number = tgtdev->dev_spec.sas_sata_inf.phy_id;
+ port_id = tgtdev->io_unit_port;
+
+ hba_port = mpi3mr_get_hba_port_by_id(mrioc, port_id);
+ if (!hba_port) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ tgtdev->dev_spec.sas_sata_inf.hba_port = hba_port;
+
+ link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
+
+ mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
+ parent_phy_number, link_rate, hba_port);
+
+ tgtdev->host_exposed = 1;
+ if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
+ sas_address_parent, hba_port)) {
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ } else if ((!tgtdev->starget)) {
+ if (!mrioc->is_driver_loading)
+ mpi3mr_sas_port_remove(mrioc, sas_address,
+ sas_address_parent, hba_port);
+ tgtdev->host_exposed = 0;
+ retval = -1;
+ }
+ return retval;
+}
+
+/**
+ * mpi3mr_remove_tgtdev_from_sas_transport - remove from SAS TL
+ * @mrioc: Adapter instance reference
+ * @tgtdev: Target device
+ *
+ * This function removes the target device
+ *
+ * Return: None.
+ */
+void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_tgt_dev *tgtdev)
+{
+ u64 sas_address_parent, sas_address;
+ struct mpi3mr_hba_port *hba_port;
+
+ if ((tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) ||
+ !mrioc->sas_transport_enabled)
+ return;
+
+ hba_port = tgtdev->dev_spec.sas_sata_inf.hba_port;
+ sas_address = tgtdev->dev_spec.sas_sata_inf.sas_address;
+ sas_address_parent = tgtdev->dev_spec.sas_sata_inf.sas_address_parent;
+ mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
+ hba_port);
+ tgtdev->host_exposed = 0;
+}
+
+/**
+ * mpi3mr_get_port_id_by_sas_phy - Get port ID of the given phy
+ * @phy: SAS transport layer phy object
+ *
+ * Return: Port number for valid ID else 0xFFFF
+ */
+static inline u8 mpi3mr_get_port_id_by_sas_phy(struct sas_phy *phy)
+{
+ u8 port_id = 0xFF;
+ struct mpi3mr_hba_port *hba_port = phy->hostdata;
+
+ if (hba_port)
+ port_id = hba_port->port_id;
+
+ return port_id;
+}
+
+/**
+ * mpi3mr_get_port_id_by_rphy - Get Port number from SAS rphy
+ *
+ * @mrioc: Adapter instance reference
+ * @rphy: SAS transport layer remote phy object
+ *
+ * Retrieves HBA port number in which the device pointed by the
+ * rphy object is attached with.
+ *
+ * Return: Valid port number on success else OxFFFF.
+ */
+static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *rphy)
+{
+ struct mpi3mr_sas_node *sas_expander;
+ struct mpi3mr_tgt_dev *tgtdev;
+ unsigned long flags;
+ u8 port_id = 0xFF;
+
+ if (!rphy)
+ return port_id;
+
+ if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ spin_lock_irqsave(&mrioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &mrioc->sas_expander_list,
+ list) {
+ if (sas_expander->rphy == rphy) {
+ port_id = sas_expander->hba_port->port_id;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
+ } else if (rphy->identify.device_type == SAS_END_DEVICE) {
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ port_id =
+ tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
+ mpi3mr_tgtdev_put(tgtdev);
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ }
+ return port_id;
+}
+
+static inline struct mpi3mr_ioc *phy_to_mrioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+
+ return shost_priv(shost);
+}
+
+static inline struct mpi3mr_ioc *rphy_to_mrioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+
+/**
+ * mpi3mr_get_expander_phy_error_log - return expander counters:
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ *
+ * Return: 0 for success, non-zero for failure.
+ *
+ */
+static int mpi3mr_get_expander_phy_error_log(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma, data_in_dma;
+ u32 data_out_sz, data_in_sz, sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_error_log_request);
+ data_in_sz = sizeof(struct phy_error_log_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_error_log_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy error log SMP request to sas_address(0x%016llx), phy_id(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy error log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy error log - function_result(%d)\n",
+ phy_error_log_reply->function_result);
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ }
+
+out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_linkerrors - return phy error counters
+ * @phy: The SAS transport layer phy object
+ *
+ * This function retrieves the phy error log information of the
+ * HBA or expander for which the phy belongs to
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int mpi3mr_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_phy_page1 phy_pg1;
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_get_expander_phy_error_log(mrioc, phy);
+
+ memset(&phy_pg1, 0, sizeof(struct mpi3_sas_phy_page1));
+ /* get hba phy error logs */
+ if ((mpi3mr_cfg_get_sas_phy_pg1(mrioc, &ioc_status, &phy_pg1,
+ sizeof(struct mpi3_sas_phy_page1),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.invalid_dword_count);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.running_disparity_error_count);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.loss_dword_synch_count);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.phy_reset_problem_count);
+ return 0;
+}
+
+/**
+ * mpi3mr_transport_get_enclosure_identifier - Get Enclosure ID
+ * @rphy: The SAS transport layer remote phy object
+ * @identifier: Enclosure identifier to be returned
+ *
+ * Returns the enclosure id for the device pointed by the remote
+ * phy object.
+ *
+ * Return: 0 on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_enclosure_identifier(struct sas_rphy *rphy,
+ u64 *identifier)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ *identifier =
+ tgtdev->enclosure_logical_id;
+ rc = 0;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_get_bay_identifier - Get bay ID
+ * @rphy: The SAS transport layer remote phy object
+ *
+ * Returns the slot id for the device pointed by the remote phy
+ * object.
+ *
+ * Return: Valid slot ID on success or -ENXIO
+ */
+static int
+mpi3mr_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = rphy_to_mrioc(rphy);
+ struct mpi3mr_tgt_dev *tgtdev = NULL;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ rphy->identify.sas_address, rphy);
+ if (tgtdev) {
+ rc = tgtdev->slot;
+ mpi3mr_tgtdev_put(tgtdev);
+ } else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * mpi3mr_expander_phy_control - expander phy control
+ * @mrioc: Adapter instance reference
+ * @phy: The SAS transport layer phy object
+ * @phy_operation: The phy operation to be executed
+ *
+ * Issues SMP passthru phy control request to execute a specific
+ * phy operation for a given expander device.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_expander_phy_control(struct mpi3mr_ioc *mrioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u16 ioc_status;
+ u16 sz;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ return -EFAULT;
+ }
+
+ data_out_sz = sizeof(struct phy_control_request);
+ data_in_sz = sizeof(struct phy_control_reply);
+ sz = data_out_sz + data_in_sz;
+ data_out = dma_alloc_coherent(&mrioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
+ if (!data_out) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + data_out_sz;
+ phy_control_reply = data_out + data_out_sz;
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_sas_phy(phy);
+ mpi_request.sas_address = cpu_to_le64(phy->identify.sas_address);
+
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_out_sz, data_out_dma);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, data_in_sz, data_in_dma);
+
+ dprint_transport_info(mrioc,
+ "sending phy control SMP request to sas_address(0x%016llx), phy_id(%d) opcode(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ phy_operation);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status))
+ goto out;
+
+ dprint_transport_info(mrioc,
+ "phy control SMP request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+
+ if (ioc_status == MPI3_IOCSTATUS_SUCCESS) {
+ dprint_transport_info(mrioc,
+ "phy control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ if (le16_to_cpu(mpi_reply.response_data_length) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+ dprint_transport_info(mrioc,
+ "phy control - function_result(%d)\n",
+ phy_control_reply->function_result);
+ rc = 0;
+ }
+ out:
+ if (data_out)
+ dma_free_coherent(&mrioc->pdev->dev, sz, data_out,
+ data_out_dma);
+
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_reset - Reset a given phy
+ * @phy: The SAS transport layer phy object
+ * @hard_reset: Flag to indicate the type of reset
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_iounit_control_request mpi_request;
+ struct mpi3_iounit_control_reply mpi_reply;
+ u16 request_sz = sizeof(struct mpi3_iounit_control_request);
+ u16 reply_sz = sizeof(struct mpi3_iounit_control_reply);
+ int rc = 0;
+ u16 ioc_status;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, request_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request.operation = MPI3_CTRL_OP_SAS_PHY_CONTROL;
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX] =
+ (hard_reset ? MPI3_CTRL_ACTION_HARD_RESET :
+ MPI3_CTRL_ACTION_LINK_RESET);
+ mpi_request.param8[MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX] =
+ phy->number;
+
+ dprint_transport_info(mrioc,
+ "sending phy reset request to sas_address(0x%016llx), phy_id(%d) hard_reset(%d)\n",
+ (unsigned long long)phy->identify.sas_address, phy->number,
+ hard_reset);
+
+ if (mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz, MPI3MR_INTADMCMD_TIMEOUT, &ioc_status)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ dprint_transport_info(mrioc,
+ "phy reset request completed with ioc_status(0x%04x)\n",
+ ioc_status);
+out:
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_enable - enable/disable phys
+ * @phy: The SAS transport layer phy object
+ * @enable: flag to enable/disable, enable phy when true
+ *
+ * This function enables/disables a given by executing required
+ * configuration page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ u16 sz;
+ int rc = 0;
+ int i, discovery_active;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address)
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page0, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit0_phy_data));
+ sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg0) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when discovery is active */
+ for (i = 0, discovery_active = 0; i < mrioc->sas_hba.num_phys ; i++) {
+ if (sas_io_unit_pg0->phy_data[i].port_flags &
+ MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS) {
+ ioc_err(mrioc,
+ "discovery is active on port = %d, phy = %d\n"
+ "\tunable to enable/disable phys, try again later!\n",
+ sas_io_unit_pg0->phy_data[i].io_unit_port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if ((sas_io_unit_pg0->phy_data[phy->number].phy_flags &
+ (MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY |
+ MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY))) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ if (enable)
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ &= ~MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_io_unit_pg1->phy_data[phy->number].phy_flags
+ |= MPI3_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_io_unit_pg1);
+ kfree(sas_io_unit_pg0);
+ return rc;
+}
+
+/**
+ * mpi3mr_transport_phy_speed - set phy min/max speed
+ * @phy: The SAS transport later phy object
+ * @rates: Rates defined as in sas_phy_linkrates
+ *
+ * This function sets the link rates given in the rates
+ * argument to the given phy by executing required configuration
+ * page changes or expander phy control command
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpi3mr_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct mpi3mr_ioc *mrioc = phy_to_mrioc(phy);
+ struct mpi3_sas_io_unit_page1 *sas_io_unit_pg1 = NULL;
+ struct mpi3_sas_phy_page0 phy_pg0;
+ u16 sz, ioc_status;
+ int rc = 0;
+
+ rc = mpi3mr_parent_present(mrioc, phy);
+ if (rc)
+ return rc;
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != mrioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return mpi3mr_expander_phy_control(mrioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+ sz = offsetof(struct mpi3_sas_io_unit_page1, phy_data) +
+ (mrioc->sas_hba.num_phys *
+ sizeof(struct mpi3_sas_io_unit1_phy_data));
+ sas_io_unit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_io_unit_pg1) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (mpi3mr_cfg_get_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ sas_io_unit_pg1->phy_data[phy->number].max_min_link_rate =
+ (rates->minimum_linkrate + (rates->maximum_linkrate << 4));
+
+ if (mpi3mr_cfg_set_sas_io_unit_pg1(mrioc, sas_io_unit_pg1, sz)) {
+ ioc_err(mrioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ mpi3mr_transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpi3mr_cfg_get_sas_phy_pg0(mrioc, &ioc_status, &phy_pg0,
+ sizeof(struct mpi3_sas_phy_page0),
+ MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER, phy->number) &&
+ (ioc_status == MPI3_IOCSTATUS_SUCCESS)) {
+ phy->minimum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate &
+ MPI3_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = mpi3mr_convert_phy_link_rate(
+ phy_pg0.programmed_link_rate >> 4);
+ phy->negotiated_linkrate =
+ mpi3mr_convert_phy_link_rate(
+ (phy_pg0.negotiated_link_rate &
+ MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK)
+ >> MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT);
+ }
+
+out:
+ kfree(sas_io_unit_pg1);
+ return rc;
+}
+
+/**
+ * mpi3mr_map_smp_buffer - map BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address holder
+ * @dma_len: Mapped DMA buffer length.
+ * @p: Virtual address holder
+ *
+ * This function maps the DMAable buffer
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int
+mpi3mr_map_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t *dma_addr, size_t *dma_len, void **p)
+{
+ /* Check if the request is split across multiple segments */
+ if (buf->sg_cnt > 1) {
+ *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr,
+ GFP_KERNEL);
+ if (!*p)
+ return -ENOMEM;
+ *dma_len = buf->payload_len;
+ } else {
+ if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL))
+ return -ENOMEM;
+ *dma_addr = sg_dma_address(buf->sg_list);
+ *dma_len = sg_dma_len(buf->sg_list);
+ *p = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * mpi3mr_unmap_smp_buffer - unmap BSG dma buffer
+ * @dev: Generic device reference
+ * @buf: BSG buffer pointer
+ * @dma_addr: Physical address to be unmapped
+ * @p: Virtual address
+ *
+ * This function unmaps the DMAable buffer
+ */
+static void
+mpi3mr_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf,
+ dma_addr_t dma_addr, void *p)
+{
+ if (p)
+ dma_free_coherent(dev, buf->payload_len, p, dma_addr);
+ else
+ dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * mpi3mr_transport_smp_handler - handler for smp passthru
+ * @job: BSG job reference
+ * @shost: SCSI host object reference
+ * @rphy: SAS transport rphy object pointing the expander
+ *
+ * This is used primarily by smp utils for sending the SMP
+ * commands to the expanders attached to the controller
+ */
+static void
+mpi3mr_transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
+ struct sas_rphy *rphy)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ struct mpi3_smp_passthrough_request mpi_request;
+ struct mpi3_smp_passthrough_reply mpi_reply;
+ int rc;
+ void *psge;
+ dma_addr_t dma_addr_in;
+ dma_addr_t dma_addr_out;
+ void *addr_in = NULL;
+ void *addr_out = NULL;
+ size_t dma_len_in;
+ size_t dma_len_out;
+ unsigned int reslen = 0;
+ u16 request_sz = sizeof(struct mpi3_smp_passthrough_request);
+ u16 reply_sz = sizeof(struct mpi3_smp_passthrough_reply);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+ u16 ioc_status;
+
+ if (mrioc->reset_in_progress) {
+ ioc_err(mrioc, "%s: host reset in progress!\n", __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ &dma_addr_out, &dma_len_out, &addr_out);
+ if (rc)
+ goto out;
+
+ if (addr_out)
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, addr_out,
+ job->request_payload.payload_len);
+
+ rc = mpi3mr_map_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ &dma_addr_in, &dma_len_in, &addr_in);
+ if (rc)
+ goto unmap_out;
+
+ memset(&mpi_request, 0, request_sz);
+ memset(&mpi_reply, 0, reply_sz);
+ mpi_request.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_TRANSPORT_CMDS);
+ mpi_request.function = MPI3_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request.io_unit_port = (u8) mpi3mr_get_port_id_by_rphy(mrioc, rphy);
+ mpi_request.sas_address = ((rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(mrioc->sas_hba.sas_address));
+ psge = &mpi_request.request_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_out - 4, dma_addr_out);
+
+ psge = &mpi_request.response_sge;
+ mpi3mr_add_sg_single(psge, sgl_flags, dma_len_in - 4, dma_addr_in);
+
+ dprint_transport_info(mrioc, "sending SMP request\n");
+
+ rc = mpi3mr_post_transport_req(mrioc, &mpi_request, request_sz,
+ &mpi_reply, reply_sz,
+ MPI3MR_INTADMCMD_TIMEOUT, &ioc_status);
+ if (rc)
+ goto unmap_in;
+
+ dprint_transport_info(mrioc,
+ "SMP request completed with ioc_status(0x%04x)\n", ioc_status);
+
+ dprint_transport_info(mrioc,
+ "SMP request - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply.response_data_length));
+
+ memcpy(job->reply, &mpi_reply, reply_sz);
+ job->reply_len = reply_sz;
+ reslen = le16_to_cpu(mpi_reply.response_data_length);
+
+ if (addr_in)
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt, addr_in,
+ job->reply_payload.payload_len);
+
+ rc = 0;
+unmap_in:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->reply_payload,
+ dma_addr_in, addr_in);
+unmap_out:
+ mpi3mr_unmap_smp_buffer(&mrioc->pdev->dev, &job->request_payload,
+ dma_addr_out, addr_out);
+out:
+ bsg_job_done(job, rc, reslen);
+}
+
+struct sas_function_template mpi3mr_transport_functions = {
+ .get_linkerrors = mpi3mr_transport_get_linkerrors,
+ .get_enclosure_identifier = mpi3mr_transport_get_enclosure_identifier,
+ .get_bay_identifier = mpi3mr_transport_get_bay_identifier,
+ .phy_reset = mpi3mr_transport_phy_reset,
+ .phy_enable = mpi3mr_transport_phy_enable,
+ .set_phy_speed = mpi3mr_transport_phy_speed,
+ .smp_handler = mpi3mr_transport_smp_handler,
+};
+
+struct scsi_transport_template *mpi3mr_transport_template;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index d00431f553e1..4d0be5ab98c1 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -534,6 +534,7 @@ typedef struct _MPI2_CONFIG_REPLY {
****************************************************************************/
#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+#define MPI2_MFGPAGE_VENDORID_ATTO (0x117C)
/*MPI v2.0 SAS products */
#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index e83c7c529dc9..2c57115172cf 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -537,7 +537,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
U16 Event; /*0x14 */
U16 Reserved4; /*0x16 */
U32 EventContext; /*0x18 */
- U32 EventData[1]; /*0x1C */
+ U32 EventData[]; /*0x1C */
} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY,
Mpi2EventNotificationReply_t,
*pMpi2EventNotificationReply_t;
@@ -639,7 +639,7 @@ typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
U8 Reserved1; /*0x01 */
U16 Reserved2; /*0x02 */
U32 Reserved3; /*0x04 */
- U32 HostData[1]; /*0x08 */
+ U32 HostData[]; /*0x08 */
} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
@@ -1397,7 +1397,7 @@ typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
U32 Reserved8; /*0x18 */
U32 Reserved9; /*0x1C */
U32 Reserved10; /*0x20 */
- U32 HostData[1]; /*0x24 */
+ U32 HostData[]; /*0x24 */
} MPI2_SEND_HOST_MESSAGE_REQUEST,
*PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
Mpi2SendHostMessageRequest_t,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 81dab9b82f79..4e981ccaac41 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -873,7 +873,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
* @fault_code: fault code
*/
void
-mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
{
ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
@@ -1057,7 +1057,7 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
desc = "config no defaults";
break;
case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
- desc = "config cant commit";
+ desc = "config can't commit";
break;
/****************************************************************************
@@ -1321,7 +1321,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
* @log_info: log info
*/
static void
-_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
{
union loginfo_type {
u32 loginfo;
@@ -1393,7 +1393,7 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
(ioc->logging_level & MPT_DEBUG_REPLY)) {
- _base_sas_ioc_info(ioc , mpi_reply,
+ _base_sas_ioc_info(ioc, mpi_reply,
mpt3sas_base_get_msg_frame(ioc, smid));
}
@@ -2011,9 +2011,10 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
enable_irq(reply_q->os_irq);
}
}
+
+ if (poll)
+ _base_process_reply_queue(reply_q);
}
- if (poll)
- _base_process_reply_queue(reply_q);
}
/**
@@ -2593,12 +2594,8 @@ _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
/* Get the SG list pointer and info. */
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return 1;
- }
/* Check if we need to build a native SG list. */
if (!base_is_prp_possible(ioc, pcie_device,
@@ -2705,12 +2702,8 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return -ENOMEM;
- }
sg_local = &mpi_request->SGL;
sges_in_segment = ioc->max_sges_in_main_message;
@@ -2853,12 +2846,8 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sg_scmd = scsi_sglist(scmd);
sges_left = scsi_dma_map(scmd);
- if (sges_left < 0) {
- sdev_printk(KERN_ERR, scmd->device,
- "scsi_dma_map failed: request for %d bytes!\n",
- scsi_bufflen(scmd));
+ if (sges_left < 0)
return -ENOMEM;
- }
sg_local = &mpi_request->SGL;
sges_in_segment = (ioc->request_sz -
@@ -3001,19 +2990,26 @@ static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
+ u64 coherent_dma_mask, dma_mask;
- if (ioc->is_mcpu_endpoint ||
- sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma ||
- dma_get_required_mask(&pdev->dev) <= 32)
+ if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
+ dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) {
ioc->dma_mask = 32;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
- else if (ioc->hba_mpi_version_belonged > MPI2_VERSION)
+ } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
ioc->dma_mask = 63;
- else
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(63);
+ } else {
ioc->dma_mask = 64;
+ coherent_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ }
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) ||
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)))
+ if (ioc->use_32bit_dma)
+ coherent_dma_mask = DMA_BIT_MASK(32);
+
+ if (dma_set_mask(&pdev->dev, dma_mask) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_dma_mask))
return -ENODEV;
if (ioc->dma_mask > 32) {
@@ -3086,6 +3082,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{
+ unsigned int irq;
struct adapter_reply_queue *reply_q, *next;
if (list_empty(&ioc->reply_queue_list))
@@ -3098,9 +3095,10 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
continue;
}
- if (ioc->smp_affinity_enable)
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- reply_q->msix_index), NULL);
+ if (ioc->smp_affinity_enable) {
+ irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
+ irq_update_affinity_hint(irq, NULL);
+ }
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q);
kfree(reply_q);
@@ -3167,18 +3165,15 @@ out:
* @ioc: per adapter object
*
* The enduser would need to set the affinity via /proc/irq/#/smp_affinity
- *
- * It would nice if we could call irq_set_affinity, however it is not
- * an exported symbol
*/
static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
- unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
struct adapter_reply_queue *reply_q;
- int local_numa_node;
int iopoll_q_count = ioc->reply_queue_count -
ioc->iopoll_q_start_index;
+ const struct cpumask *mask;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -3201,11 +3196,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
* corresponding to high iops queues.
*/
if (ioc->high_iops_queues) {
- local_numa_node = dev_to_node(&ioc->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
for (index = 0; index < ioc->high_iops_queues;
index++) {
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- index), cpumask_of_node(local_numa_node));
+ irq = pci_irq_vector(ioc->pdev, index);
+ irq_set_affinity_and_hint(irq, mask);
}
}
@@ -3704,10 +3699,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
for (i = 0; i < ioc->combined_reply_index_count; i++) {
- ioc->replyPostRegisterIndex[i] = (resource_size_t *)
- ((u8 __force *)&ioc->chip->Doorbell +
- MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
- (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
+ ioc->replyPostRegisterIndex[i] =
+ (resource_size_t __iomem *)
+ ((u8 __force *)&ioc->chip->Doorbell +
+ MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
+ (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
}
}
@@ -4764,7 +4760,7 @@ static void
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
{
int i = 0;
- char desc[16];
+ char desc[17] = {0};
u32 iounit_pg1_flags;
u32 bios_version;
@@ -5380,6 +5376,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
Mpi2ConfigReply_t mpi_reply;
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
+ u16 depth;
int sz;
int rc = 0;
@@ -5391,7 +5388,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
goto out;
/* sas iounit page 1 */
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
- sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
if (!sas_iounit_pg1) {
pr_err("%s: failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -5404,16 +5401,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
ioc->name, __FILE__, __LINE__, __func__);
goto out;
}
- ioc->max_wideport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
- MPT3SAS_SAS_QUEUE_DEPTH;
- ioc->max_narrowport_qd =
- (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
- le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
- MPT3SAS_SAS_QUEUE_DEPTH;
- ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
- sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
+ ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
+ ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
+
+ depth = sas_iounit_pg1->SATAMaxQDepth;
+ ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
+
/* pcie iounit page 1 */
rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
&pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
@@ -5435,6 +5432,151 @@ out:
}
/**
+ * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
+ *
+ * @ioc : per adapter object
+ * @n : ptr to the ATTO nvram structure
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
+ struct ATTO_SAS_NVRAM *n)
+{
+ int r = -EINVAL;
+ union ATTO_SAS_ADDRESS *s1;
+ u32 len;
+ u8 *pb;
+ u8 ckSum;
+
+ /* validate nvram checksum */
+ pb = (u8 *) n;
+ ckSum = ATTO_SASNVR_CKSUM_SEED;
+ len = sizeof(struct ATTO_SAS_NVRAM);
+
+ while (len--)
+ ckSum = ckSum + pb[len];
+
+ if (ckSum) {
+ ioc_err(ioc, "Invalid ATTO NVRAM checksum\n");
+ return r;
+ }
+
+ s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr;
+
+ if (n->Signature[0] != 'E'
+ || n->Signature[1] != 'S'
+ || n->Signature[2] != 'A'
+ || n->Signature[3] != 'S')
+ ioc_err(ioc, "Invalid ATTO NVRAM signature\n");
+ else if (n->Version > ATTO_SASNVR_VERSION)
+ ioc_info(ioc, "Invalid ATTO NVRAM version");
+ else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1))
+ || s1->b[0] != 0x50
+ || s1->b[1] != 0x01
+ || s1->b[2] != 0x08
+ || (s1->b[3] & 0xF0) != 0x60
+ || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) {
+ ioc_err(ioc, "Invalid ATTO SAS address\n");
+ } else
+ r = 0;
+ return r;
+}
+
+/**
+ * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
+ *
+ * @ioc : per adapter object
+ * @*sas_addr : return sas address
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr)
+{
+ Mpi2ManufacturingPage1_t mfg_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ struct ATTO_SAS_NVRAM *nvram;
+ int r;
+ __be64 addr;
+
+ r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1);
+ if (r) {
+ ioc_err(ioc, "Failed to read manufacturing page 1\n");
+ return r;
+ }
+
+ /* validate nvram */
+ nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD;
+ r = mpt3sas_atto_validate_nvram(ioc, nvram);
+ if (r)
+ return r;
+
+ addr = *((__be64 *) nvram->SasAddr);
+ sas_addr->q = cpu_to_le64(be64_to_cpu(addr));
+ return r;
+}
+
+/**
+ * mpt3sas_atto_init - perform initializaion for ATTO branded
+ * adapter.
+ * @ioc : per adapter object
+ *5
+ * Return: 0 for success, non-zero for failure.
+ */
+static int
+mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc)
+{
+ int sz = 0;
+ Mpi2BiosPage4_t *bios_pg4 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ int ix;
+ union ATTO_SAS_ADDRESS sas_addr;
+ union ATTO_SAS_ADDRESS temp;
+ union ATTO_SAS_ADDRESS bias;
+
+ r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr);
+ if (r)
+ return r;
+
+ /* get header first to get size */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n");
+ return r;
+ }
+
+ sz = mpi_reply.Header.PageLength * sizeof(u32);
+ bios_pg4 = kzalloc(sz, GFP_KERNEL);
+ if (!bios_pg4) {
+ ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n");
+ return -ENOMEM;
+ }
+
+ /* read bios page 4 */
+ r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+ if (r) {
+ ioc_err(ioc, "Failed to read ATTO bios page 4\n");
+ goto out;
+ }
+
+ /* Update bios page 4 with the ATTO WWID */
+ bias.q = sas_addr.q;
+ bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS;
+
+ for (ix = 0; ix < bios_pg4->NumPhys; ix++) {
+ temp.q = sas_addr.q;
+ temp.b[7] += ix;
+ bios_pg4->Phy[ix].ReassignmentWWID = temp.q;
+ bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q;
+ }
+ r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
+
+out:
+ kfree(bios_pg4);
+ return r;
+}
+
+/**
* _base_static_config_pages - static start of day config pages
* @ioc: per adapter object
*/
@@ -5457,6 +5599,13 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
if (rc)
return rc;
}
+
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ rc = mpt3sas_atto_init(ioc);
+ if (rc)
+ return rc;
+ }
+
/*
* Ensure correct T10 PI operation if vendor left EEDPTagMode
* flag unset in NVDATA.
@@ -5506,12 +5655,21 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
rc = _base_assign_fw_reported_qd(ioc);
if (rc)
return rc;
- rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
- if (rc)
- return rc;
- rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
- if (rc)
- return rc;
+
+ /*
+ * ATTO doesn't use bios page 2 and 3 for bios settings.
+ */
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO)
+ ioc->bios_pg3.BiosVersion = 0;
+ else {
+ rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ if (rc)
+ return rc;
+ rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ if (rc)
+ return rc;
+ }
+
rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
if (rc)
return rc;
@@ -5728,22 +5886,20 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
* having same upper 32bits in their base memory address.
- * @reply_pool_start_address: Base address of a reply queue set
+ * @start_address: Base address of a reply queue set
* @pool_sz: Size of single Reply Descriptor Post Queues pool size
*
* Return: 1 if reply queues in a set have a same upper 32bits in their base
* memory address, else 0.
*/
-
static int
-mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz)
+mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
{
- long reply_pool_end_address;
+ dma_addr_t end_address;
- reply_pool_end_address = reply_pool_start_address + pool_sz;
+ end_address = start_address + pool_sz - 1;
- if (upper_32_bits(reply_pool_start_address) ==
- upper_32_bits(reply_pool_end_address))
+ if (upper_32_bits(start_address) == upper_32_bits(end_address))
return 1;
else
return 0;
@@ -5804,7 +5960,7 @@ _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
}
if (!mpt3sas_check_same_4gb_region(
- (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) {
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
ioc->pcie_sg_lookup[i].pcie_sgl,
(unsigned long long)
@@ -5859,8 +6015,8 @@ _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
GFP_KERNEL, &ctr->chain_buffer_dma);
if (!ctr->chain_buffer)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)
- ctr->chain_buffer, ioc->chain_segment_sz)) {
+ if (!mpt3sas_check_same_4gb_region(
+ ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
ioc_err(ioc,
"Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
ctr->chain_buffer,
@@ -5896,7 +6052,7 @@ _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
GFP_KERNEL, &ioc->sense_dma);
if (!ioc->sense)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) {
+ if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
dinitprintk(ioc, pr_err(
"Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
ioc->sense, (unsigned long long) ioc->sense_dma));
@@ -5929,7 +6085,7 @@ _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
&ioc->reply_dma);
if (!ioc->reply)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
dinitprintk(ioc, pr_err(
"Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
ioc->reply, (unsigned long long) ioc->reply_dma));
@@ -5964,7 +6120,7 @@ _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
GFP_KERNEL, &ioc->reply_free_dma);
if (!ioc->reply_free)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) {
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
dinitprintk(ioc,
pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
@@ -6003,7 +6159,7 @@ _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
GFP_KERNEL, &ioc->reply_post_free_array_dma);
if (!ioc->reply_post_free_array)
return -EAGAIN;
- if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array,
+ if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
reply_post_free_array_sz)) {
dinitprintk(ioc, pr_err(
"Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
@@ -6068,7 +6224,7 @@ base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
* resources and set DMA mask to 32 and allocate.
*/
if (!mpt3sas_check_same_4gb_region(
- (long)ioc->reply_post[i].reply_post_free, sz)) {
+ ioc->reply_post[i].reply_post_free_dma, sz)) {
dinitprintk(ioc,
ioc_err(ioc, "bad Replypost free pool(0x%p)"
"reply_post_free_dma = (0x%llx)\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index a0af986633d2..05364aa15ecd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "39.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 39
+#define MPT3SAS_DRIVER_VERSION "43.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 43
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1588,7 +1588,7 @@ struct MPT3SAS_ADAPTER {
u8 combined_reply_index_count;
u8 smp_affinity_enable;
/* reply post register index */
- resource_size_t **replyPostRegisterIndex;
+ resource_size_t __iomem **replyPostRegisterIndex;
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
@@ -1652,6 +1652,32 @@ struct mpt3sas_debugfs_buffer {
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
+/*
+ * struct ATTO_SAS_NVRAM - ATTO NVRAM settings stored
+ * in Manufacturing page 1 used to get
+ * ATTO SasAddr.
+ */
+struct ATTO_SAS_NVRAM {
+ u8 Signature[4];
+ u8 Version;
+#define ATTO_SASNVR_VERSION 0
+
+ u8 Checksum;
+#define ATTO_SASNVR_CKSUM_SEED 0x5A
+ u8 Pad[10];
+ u8 SasAddr[8];
+#define ATTO_SAS_ADDR_ALIGN 64
+ u8 Reserved[232];
+};
+
+#define ATTO_SAS_ADDR_DEVNAME_BIAS 63
+
+union ATTO_SAS_ADDRESS {
+ U8 b[8];
+ U16 w[4];
+ U32 d[2];
+ U64 q;
+};
/* base shared API */
extern struct list_head mpt3sas_ioc_list;
@@ -1828,6 +1854,9 @@ int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
u8 *num_phys);
int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
+
int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
u16 sz);
@@ -1846,6 +1875,12 @@ int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage2_t *config_page);
int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
+int mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_page);
int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
*mpi_reply, Mpi2IOUnitPage0_t *config_page);
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 0563078227de..d114ef381c44 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
retry_count++;
if (ioc->config_cmds.smid == smid)
mpt3sas_base_free_smid(ioc, smid);
- if ((ioc->shost_recovery) || (ioc->config_cmds.status &
- MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ if (ioc->config_cmds.status & MPT3_CMD_RESET)
goto retry_config;
- issue_host_reset = 1;
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ issue_host_reset = 0;
+ r = -EFAULT;
+ } else
+ issue_host_reset = 1;
goto free_mem;
}
@@ -538,6 +541,42 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * mpt3sas_config_get_manufacturing_pg1 - obtain manufacturing page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -754,11 +793,99 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
r = _config_request(ioc, &mpi_request, mpi_reply,
MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
sizeof(*config_page));
+
out:
return r;
}
/**
+ * mpt3sas_config_set_bios_pg4 - write out bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg4 - read bios page 4
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz_config_pg: sizeof the config page
+ * Context: sleep.
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg4(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage4_t *config_page,
+ int sz_config_pg)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 4;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE4_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ /*
+ * The sizeof the page is variable. Allow for just the
+ * size to be returned
+ */
+ if (config_page && sz_config_pg) {
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz_config_pg);
+ }
+
+out:
+ return r;
+}
+
+/**
* mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 05b6c6a073c3..0d8b1e942ded 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -578,7 +578,7 @@ static int
_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
- u8 found = 0;
+ bool found = false;
u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
@@ -600,6 +600,7 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
handle = le16_to_cpu(tm_request->DevHandle);
for (smid = ioc->scsiio_depth; smid && !found; smid--) {
struct scsiio_tracker *st;
+ __le16 task_mid;
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
@@ -618,10 +619,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
* first outstanding smid will be picked up. Otherwise,
* targeted smid will be the one.
*/
- if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) {
- tm_request->TaskMID = cpu_to_le16(st->smid);
- found = 1;
- }
+ task_mid = cpu_to_le16(st->smid);
+ if (!tm_request->TaskMID)
+ tm_request->TaskMID = task_mid;
+ found = tm_request->TaskMID == task_mid;
}
if (!found) {
@@ -947,6 +948,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
break;
}
case MPI2_FUNCTION_FW_DOWNLOAD:
+ {
+ if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
+ ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n");
+ ret = -EPERM;
+ break;
+ }
+ fallthrough;
+ }
case MPI2_FUNCTION_FW_UPLOAD:
{
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
@@ -1685,6 +1694,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
request_data = ioc->diag_buffer[buffer_type];
@@ -1786,6 +1796,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (rc && request_data) {
dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
}
@@ -2162,6 +2173,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
@@ -2416,6 +2428,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
ioc->ctl_cmds.status = MPT3_CMD_PENDING;
memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, ioc->request_sz);
ioc->ctl_cmds.smid = smid;
mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
@@ -3533,11 +3546,31 @@ diag_trigger_master_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MASTER_TRIGGER_T *master_tg;
unsigned long flags;
ssize_t rc;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+
+ if (ioc->supports_trigger_pages) {
+ master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T),
+ GFP_KERNEL);
+ if (!master_tg)
+ return -ENOMEM;
+
+ memcpy(master_tg, buf, rc);
+ if (!master_tg->MasterData)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg,
+ set)) {
+ kfree(master_tg);
+ return -EFAULT;
+ }
+ kfree(master_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_master, 0,
sizeof(struct SL_WH_MASTER_TRIGGER_T));
memcpy(&ioc->diag_trigger_master, buf, rc);
@@ -3589,11 +3622,31 @@ diag_trigger_event_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!event_tg)
+ return -ENOMEM;
+
+ memcpy(event_tg, buf, sz);
+ if (!event_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg,
+ set)) {
+ kfree(event_tg);
+ return -EFAULT;
+ }
+ kfree(event_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
memset(&ioc->diag_trigger_event, 0,
sizeof(struct SL_WH_EVENT_TRIGGERS_T));
memcpy(&ioc->diag_trigger_event, buf, sz);
@@ -3644,11 +3697,31 @@ diag_trigger_scsi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
+
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!scsi_tg)
+ return -ENOMEM;
+
+ memcpy(scsi_tg, buf, sz);
+ if (!scsi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg,
+ set)) {
+ kfree(scsi_tg);
+ return -EFAULT;
+ }
+ kfree(scsi_tg);
+ }
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- sz = min(sizeof(ioc->diag_trigger_scsi), count);
+
memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
memcpy(&ioc->diag_trigger_scsi, buf, sz);
if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
@@ -3698,11 +3771,30 @@ diag_trigger_mpi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!mpi_tg)
+ return -ENOMEM;
+
+ memcpy(mpi_tg, buf, sz);
+ if (!mpi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg,
+ set)) {
+ kfree(mpi_tg);
+ return -EFAULT;
+ }
+ kfree(mpi_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_mpi, 0,
sizeof(ioc->diag_trigger_mpi));
memcpy(&ioc->diag_trigger_mpi, buf, sz);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 00792767c620..8e24ebcebfe5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
ioc->fw_events_cleanup = 0;
}
@@ -5156,6 +5156,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
/* invalid device handle */
handle = sas_target_priv_data->handle;
+
+ /*
+ * Avoid error handling escallation when device is disconnected
+ */
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
+ if (scmd->device->host->shost_state == SHOST_RECOVERY &&
+ scmd->cmnd[0] == TEST_UNIT_READY) {
+ scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
+ scsi_done(scmd);
+ return 0;
+ }
+ }
+
if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
scmd->result = DID_NO_CONNECT << 16;
scsi_done(scmd);
@@ -5294,7 +5307,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
}
/**
- * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
* @ioc: per adapter object
* @scmd: pointer to scsi command object
* @mpi_reply: reply mf payload returned from firmware
@@ -10926,20 +10939,20 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
case MPI2_EVENT_LOG_ENTRY_ADDED:
{
Mpi2EventDataLogEntryAdded_t *log_entry;
- u32 *log_code;
+ u32 log_code;
if (!ioc->is_warpdrive)
break;
log_entry = (Mpi2EventDataLogEntryAdded_t *)
mpi_reply->EventData;
- log_code = (u32 *)log_entry->LogData;
+ log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
if (le16_to_cpu(log_entry->LogEntryQualifier)
!= MPT2_WARPDRIVE_LOGENTRY)
break;
- switch (le32_to_cpu(*log_code)) {
+ switch (log_code) {
case MPT2_WARPDRIVE_LC_SSDT:
ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
@@ -11035,6 +11048,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
{
struct _sas_port *mpt3sas_port, *next;
unsigned long flags;
+ int port_id;
/* remove sibling ports attached to this expander */
list_for_each_entry_safe(mpt3sas_port, next,
@@ -11055,6 +11069,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_port->hba_port);
}
+ port_id = sas_expander->port->port_id;
+
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
sas_expander->sas_address_parent, sas_expander->port);
@@ -11062,7 +11078,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
"expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
sas_expander->handle, (unsigned long long)
sas_expander->sas_address,
- sas_expander->port->port_id);
+ port_id);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
@@ -11383,6 +11399,7 @@ scsih_shutdown(struct pci_dev *pdev)
_scsih_ir_shutdown(ioc);
_scsih_nvme_shutdown(ioc);
mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_stop_watchdog(ioc);
ioc->shost_recovery = 1;
mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
@@ -11868,7 +11885,7 @@ out:
* scsih_map_queues - map reply queues with request queues
* @shost: SCSI host pointer
*/
-static int scsih_map_queues(struct Scsi_Host *shost)
+static void scsih_map_queues(struct Scsi_Host *shost)
{
struct MPT3SAS_ADAPTER *ioc =
(struct MPT3SAS_ADAPTER *)shost->hostdata;
@@ -11878,7 +11895,7 @@ static int scsih_map_queues(struct Scsi_Host *shost)
int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
map = &shost->tag_set.map[i];
@@ -11906,7 +11923,6 @@ static int scsih_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
}
- return 0;
}
/* shost template for SAS 2.0 HBA devices */
@@ -11971,7 +11987,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
.max_segment_size = 0xffffffff,
- .cmd_per_lun = 7,
+ .cmd_per_lun = 128,
.shost_groups = mpt3sas_host_groups,
.sdev_groups = mpt3sas_dev_groups,
.track_queue_depth = 1,
@@ -12406,7 +12422,6 @@ scsih_suspend(struct device *dev)
return rc;
mpt3sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
_scsih_nvme_shutdown(ioc);
ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
@@ -12585,20 +12600,18 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
*/
bool scsih_ncq_prio_supp(struct scsi_device *sdev)
{
- unsigned char *buf;
+ struct scsi_vpd *vpd;
bool ncq_prio_supp = false;
- if (!scsi_device_supports_vpd(sdev))
- return ncq_prio_supp;
-
- buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
- if (!buf)
- return ncq_prio_supp;
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (!vpd || vpd->len < 214)
+ goto out;
- if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
- ncq_prio_supp = (buf[213] >> 4) & 1;
+ ncq_prio_supp = (vpd->data[213] >> 4) & 1;
+out:
+ rcu_read_unlock();
- kfree(buf);
return ncq_prio_supp;
}
/*
@@ -12732,6 +12745,12 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
/*
+ * ATTO Branded ExpressSAS H12xx GT
+ */
+ { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
+ /*
* Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
*/
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c
index 869b8b058a43..472fa043094f 100644
--- a/drivers/scsi/mvme147.c
+++ b/drivers/scsi/mvme147.c
@@ -11,8 +11,12 @@
#include <asm/mvme147hw.h>
#include <asm/irq.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
#include "mvme147.h"
@@ -29,10 +33,11 @@ static irqreturn_t mvme147_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct Scsi_Host *instance = cmd->device->host;
struct WD33C93_hostdata *hdata = shost_priv(instance);
unsigned char flags = 0x01;
- unsigned long addr = virt_to_bus(cmd->SCp.ptr);
+ unsigned long addr = virt_to_bus(scsi_pointer->ptr);
/* setup dma direction */
if (!dir_in)
@@ -43,14 +48,14 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
if (dir_in) {
/* invalidate any cache */
- cache_clear(addr, cmd->SCp.this_residual);
+ cache_clear(addr, scsi_pointer->this_residual);
} else {
/* push any dirty cache */
- cache_push(addr, cmd->SCp.this_residual);
+ cache_push(addr, scsi_pointer->this_residual);
}
/* start DMA */
- m147_pcc->dma_bcr = cmd->SCp.this_residual | (1 << 24);
+ m147_pcc->dma_bcr = scsi_pointer->this_residual | (1 << 24);
m147_pcc->dma_dadr = addr;
m147_pcc->dma_cntrl = flags;
@@ -77,6 +82,7 @@ static struct scsi_host_template mvme147_host_template = {
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static struct Scsi_Host *mvme147_shost;
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index 199ab49aa047..7123a2efbf58 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -486,9 +486,4 @@ enum datapres_field {
SENSE_DATA = 2,
};
-/* define task management IU */
-struct mvs_tmf_task{
- u8 tmf;
- u16 tag_of_task_to_be_managed;
-};
#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index dcae2d4464f9..2fde496fff5f 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -63,9 +63,8 @@ static struct sas_domain_function_template mvs_transport_ops = {
.lldd_control_phy = mvs_phy_control,
.lldd_abort_task = mvs_abort_task,
- .lldd_abort_task_set = mvs_abort_task_set,
- .lldd_clear_aca = mvs_clear_aca,
- .lldd_clear_task_set = mvs_clear_task_set,
+ .lldd_abort_task_set = sas_abort_task_set,
+ .lldd_clear_task_set = sas_clear_task_set,
.lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
.lldd_lu_reset = mvs_lu_reset,
.lldd_query_task = mvs_query_task,
@@ -494,7 +493,6 @@ static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
- struct mvs_prv_info *mpi;
irq_handler_t irq_handler = mvs_interrupt;
struct Scsi_Host *shost = NULL;
const struct mvs_chip_info *chip;
@@ -559,10 +557,13 @@ static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
}
nhost++;
} while (nhost < chip->n_host);
- mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ {
+ struct mvs_prv_info *mpi = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
(unsigned long)SHOST_TO_SAS_HA(shost));
+ }
#endif
mvs_post_sas_ha_init(shost, chip);
@@ -646,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+ { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
{ PCI_VDEVICE(TTI, 0x2710), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2720), chip_9480 },
{ PCI_VDEVICE(TTI, 0x2721), chip_9480 },
@@ -696,7 +698,7 @@ static struct pci_driver mvs_pci_driver = {
static ssize_t driver_version_show(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
+ return sysfs_emit(buffer, "%s\n", DRV_VERSION);
}
static DEVICE_ATTR_RO(driver_version);
@@ -744,7 +746,7 @@ static ssize_t interrupt_coalescing_store(struct device *cdev,
static ssize_t interrupt_coalescing_show(struct device *cdev,
struct device_attribute *attr, char *buffer)
{
- return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
+ return sysfs_emit(buffer, "%d\n", interrupt_coalescing);
}
static DEVICE_ATTR_RW(interrupt_coalescing);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 31d1ea5a5dd2..a6867dae0e7c 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -67,8 +67,10 @@ static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
phy = container_of(sha->sas_port[i]->phy_list.next,
struct asd_sas_phy, port_phy_el);
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
j = 0;
while (sha->sas_phy[j]) {
if (sha->sas_phy[j] == phy)
@@ -96,6 +98,8 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
struct asd_sas_phy *phy;
+
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
list_for_each_entry(phy,
&sha->sas_port[i]->phy_list, port_phy_el) {
j = 0;
@@ -109,6 +113,7 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
num++;
n++;
}
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
break;
}
i++;
@@ -551,7 +556,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
static int mvs_task_prep_ssp(struct mvs_info *mvi,
struct mvs_task_exec_info *tei, int is_tmf,
- struct mvs_tmf_task *tmf)
+ struct sas_tmf_task *tmf)
{
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
@@ -691,7 +696,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
- struct mvs_tmf_task *tmf, int *pass)
+ struct sas_tmf_task *tmf, int *pass)
{
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
@@ -810,9 +815,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
slot->port = tei.port;
task->lldd_task = slot;
list_add_tail(&slot->entry, &tei.port->list);
- spin_lock(&task->task_state_lock);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&task->task_state_lock);
mvi_dev->running_req++;
++(*pass);
@@ -835,14 +837,14 @@ prep_out:
return rc;
}
-static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
- struct completion *completion, int is_tmf,
- struct mvs_tmf_task *tmf)
+int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
struct mvs_info *mvi = NULL;
u32 rc = 0;
u32 pass = 0;
unsigned long flags = 0;
+ struct sas_tmf_task *tmf = task->tmf;
+ int is_tmf = !!task->tmf;
mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
@@ -859,11 +861,6 @@ static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
return rc;
}
-int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
-}
-
static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
{
u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
@@ -1257,112 +1254,6 @@ void mvs_dev_gone(struct domain_device *dev)
mvs_dev_gone_notify(dev);
}
-static void mvs_task_done(struct sas_task *task)
-{
- if (!del_timer(&task->slow_task->timer))
- return;
- complete(&task->slow_task->completion);
-}
-
-static void mvs_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
-
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
-}
-
-#define MVS_TASK_TIMEOUT 20
-static int mvs_exec_internal_tmf_task(struct domain_device *dev,
- void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
-{
- int res, retry;
- struct sas_task *task = NULL;
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
-
- memcpy(&task->ssp_task, parameter, para_len);
- task->task_done = mvs_task_done;
-
- task->slow_task->timer.function = mvs_tmf_timedout;
- task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
- add_timer(&task->slow_task->timer);
-
- res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- mv_printk("executing internal task failed:%d\n", res);
- goto ex_err;
- }
-
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
- goto ex_err;
- }
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun */
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- mv_dprintk("blocked task error.\n");
- res = -EMSGSIZE;
- break;
- } else {
- mv_dprintk(" task to dev %016llx response: 0x%x "
- "status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- sas_free_task(task);
- task = NULL;
-
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
-
-static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
- u8 *lun, struct mvs_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
- if (!(dev->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy(ssp_task.LUN, lun, 8);
-
- return mvs_exec_internal_tmf_task(dev, &ssp_task,
- sizeof(ssp_task), tmf);
-}
-
-
/* Standard mandates link reset for ATA (type 0)
and hard reset for SSP (type 1) , only for RECOVERY */
static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
@@ -1382,13 +1273,11 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
{
unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
struct mvs_device * mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- tmf_task.tmf = TMF_LU_RESET;
mvi_dev->dev_status = MVS_DEV_EH;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+ rc = sas_lu_reset(dev, lun);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&mvi->lock, flags);
mvs_release_task(mvi, dev);
@@ -1425,27 +1314,20 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
int mvs_query_task(struct sas_task *task)
{
u32 tag;
- struct scsi_lun lun;
- struct mvs_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
- int_to_scsilun(cmnd->device->lun, &lun);
rc = mvs_find_tag(mvi, task, &tag);
if (rc == 0) {
rc = TMF_RESP_FUNC_FAILED;
return rc;
}
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
@@ -1462,8 +1344,6 @@ int mvs_query_task(struct sas_task *task)
/* mandatory SAM-3, still need free task/slot info */
int mvs_abort_task(struct sas_task *task)
{
- struct scsi_lun lun;
- struct mvs_tmf_task tmf_task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
struct mvs_info *mvi;
@@ -1487,9 +1367,6 @@ int mvs_abort_task(struct sas_task *task)
spin_unlock_irqrestore(&task->task_state_lock, flags);
mvi_dev->dev_status = MVS_DEV_EH;
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
-
- int_to_scsilun(cmnd->device->lun, &lun);
rc = mvs_find_tag(mvi, task, &tag);
if (rc == 0) {
mv_printk("No such tag in %s\n", __func__);
@@ -1497,10 +1374,7 @@ int mvs_abort_task(struct sas_task *task)
return rc;
}
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
-
- rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_abort_task(task, tag);
/* if successful, clear the task and callback forwards.*/
if (rc == TMF_RESP_FUNC_COMPLETE) {
@@ -1537,39 +1411,6 @@ out:
return rc;
}
-int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
-{
- int rc;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
-int mvs_clear_aca(struct domain_device *dev, u8 *lun)
-{
- int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
-int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
-{
- int rc = TMF_RESP_FUNC_FAILED;
- struct mvs_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
- rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
-
- return rc;
-}
-
static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
u32 slot_idx, int err)
{
@@ -1636,7 +1477,7 @@ static void mvs_set_sense(u8 *buffer, int len, int d_sense,
static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
u8 key, u8 asc, u8 asc_q)
{
- iu->datapres = 2;
+ iu->datapres = SAS_DATAPRES_SENSE_DATA;
iu->response_data_len = 0;
iu->sense_data_len = 17;
iu->status = 02;
@@ -1716,8 +1557,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
mvi_dev = dev->lldd_dev;
spin_lock(&task->task_state_lock);
- task->task_state_flags &=
- ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
task->task_state_flags |= SAS_TASK_STATE_DONE;
/* race condition*/
aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 8ff976c9967e..509d8f32a04f 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -440,9 +440,6 @@ void mvs_scan_start(struct Scsi_Host *shost);
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
int mvs_abort_task(struct sas_task *task);
-int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
-int mvs_clear_aca(struct domain_device *dev, u8 *lun);
-int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
void mvs_port_formed(struct asd_sas_phy *sas_phy);
void mvs_port_deformed(struct asd_sas_phy *sas_phy);
int mvs_dev_found(struct domain_device *dev);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 904de62c974c..05d3ce9b72db 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1302,7 +1302,7 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
{
struct scsi_cmnd *scmd = cmd->scmd;
- cmd->scmd->SCp.ptr = NULL;
+ mvumi_priv(cmd->scmd)->cmd_priv = NULL;
scmd->result = ob_frame->req_status;
switch (ob_frame->req_status) {
@@ -2097,7 +2097,7 @@ static int mvumi_queue_command(struct Scsi_Host *shost,
goto out_return_cmd;
cmd->scmd = scmd;
- scmd->SCp.ptr = (char *) cmd;
+ mvumi_priv(scmd)->cmd_priv = cmd;
mhba->instancet->fire_cmd(mhba, cmd);
spin_unlock_irqrestore(shost->host_lock, irq_flags);
return 0;
@@ -2111,7 +2111,7 @@ out_return_cmd:
static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
{
- struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
+ struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv;
struct Scsi_Host *host = scmd->device->host;
struct mvumi_hba *mhba = shost_priv(host);
unsigned long flags;
@@ -2128,7 +2128,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
atomic_dec(&mhba->fw_outstanding);
scmd->result = (DID_ABORT << 16);
- scmd->SCp.ptr = NULL;
+ mvumi_priv(scmd)->cmd_priv = NULL;
if (scsi_bufflen(scmd)) {
dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
@@ -2179,6 +2179,7 @@ static struct scsi_host_template mvumi_template = {
.bios_param = mvumi_bios_param,
.dma_boundary = PAGE_SIZE - 1,
.this_id = -1,
+ .cmd_size = sizeof(struct mvumi_cmd_priv),
};
static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index 60d5691fc4ab..a88c58787b68 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -254,6 +254,15 @@ struct mvumi_cmd {
unsigned char cmd_status;
};
+struct mvumi_cmd_priv {
+ struct mvumi_cmd *cmd_priv;
+};
+
+static inline struct mvumi_cmd_priv *mvumi_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
/*
* the function type of the in bound frame
*/
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 2a4506a5083e..e885c1dbf61f 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1239,7 +1239,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
myrb_unmap(cb);
if (cb->mmio_base) {
- cb->disable_intr(cb->io_base);
+ if (cb->disable_intr)
+ cb->disable_intr(cb->io_base);
iounmap(cb->mmio_base);
}
if (cb->irq)
@@ -1674,7 +1675,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
if (sdev->id > MYRB_MAX_TARGETS)
return -ENXIO;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
@@ -3413,9 +3414,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
mutex_init(&cb->dcmd_mutex);
mutex_init(&cb->dma_mutex);
cb->pdev = pdev;
+ cb->host = shost;
- if (pci_enable_device(pdev))
- goto failure;
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ scsi_host_put(shost);
+ return NULL;
+ }
if (privdata->hw_init == DAC960_PD_hw_init ||
privdata->hw_init == DAC960_P_hw_init) {
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 6ea323e9a2e3..7eb8c39da366 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -538,13 +538,11 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = NULL;
goto out_free;
}
- cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
- GFP_KERNEL | GFP_DMA);
+ cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
if (!cs->ctlr_info)
goto out_free;
- cs->event_buf = kzalloc(sizeof(struct myrs_event),
- GFP_KERNEL | GFP_DMA);
+ cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
if (!cs->event_buf)
goto out_free;
@@ -1805,7 +1803,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
ldev_num = myrs_translate_ldev(cs, sdev);
- ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!ldev_info)
return -ENOMEM;
@@ -1867,7 +1865,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
} else {
struct myrs_pdev_info *pdev_info;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
@@ -2269,7 +2267,8 @@ static void myrs_cleanup(struct myrs_hba *cs)
myrs_unmap(cs);
if (cs->mmio_base) {
- cs->disable_intr(cs);
+ if (cs->disable_intr)
+ cs->disable_intr(cs);
iounmap(cs->mmio_base);
cs->mmio_base = NULL;
}
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index fc8abe05fa8f..4458449c960b 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -514,30 +514,29 @@ static m_addr_t __vtobus(m_bush_t bush, void *m)
* Deal with DMA mapping/unmapping.
*/
-/* To keep track of the dma mapping (sg/single) that has been set */
-#define __data_mapped SCp.phase
-#define __data_mapping SCp.have_data_in
-
static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
{
- switch(cmd->__data_mapped) {
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
+
+ switch(cmd_priv->data_mapped) {
case 2:
scsi_dma_unmap(cmd);
break;
}
- cmd->__data_mapped = 0;
+ cmd_priv->data_mapped = 0;
}
static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
{
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
int use_sg;
use_sg = scsi_dma_map(cmd);
if (!use_sg)
return 0;
- cmd->__data_mapped = 2;
- cmd->__data_mapping = use_sg;
+ cmd_priv->data_mapped = 2;
+ cmd_priv->data_mapping = use_sg;
return use_sg;
}
@@ -7854,6 +7853,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device)
static int ncr53c8xx_queue_command_lck(struct scsi_cmnd *cmd)
{
+ struct ncr_cmd_priv *cmd_priv = scsi_cmd_priv(cmd);
void (*done)(struct scsi_cmnd *) = scsi_done;
struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb;
unsigned long flags;
@@ -7864,8 +7864,8 @@ printk("ncr53c8xx_queue_command\n");
#endif
cmd->host_scribble = NULL;
- cmd->__data_mapped = 0;
- cmd->__data_mapping = 0;
+ cmd_priv->data_mapped = 0;
+ cmd_priv->data_mapping = 0;
spin_lock_irqsave(&np->smp_lock, flags);
@@ -8085,6 +8085,8 @@ struct Scsi_Host * __init ncr_attach(struct scsi_host_template *tpnt,
u_long flags = 0;
int i;
+ WARN_ON_ONCE(tpnt->cmd_size < sizeof(struct ncr_cmd_priv));
+
if (!tpnt->name)
tpnt->name = SCSI_NCR_DRIVER_NAME;
if (!tpnt->shost_groups)
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index fa14b5ca8783..be38c902859e 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -1288,6 +1288,12 @@ struct ncr_device {
u8 differential;
};
+/* To keep track of the dma mapping (sg/single) that has been set */
+struct ncr_cmd_priv {
+ int data_mapped;
+ int data_mapping;
+};
+
extern struct Scsi_Host *ncr_attach(struct scsi_host_template *tpnt, int unit, struct ncr_device *device);
extern void ncr53c8xx_release(struct Scsi_Host *host);
irqreturn_t ncr53c8xx_intr(int irq, void *dev_id);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index bd3ee3bf08ee..75bb0028ed74 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -273,6 +273,7 @@ static struct scsi_host_template nsp32_template = {
.eh_abort_handler = nsp32_eh_abort,
.eh_host_reset_handler = nsp32_eh_host_reset,
/* .highmem_io = 1, */
+ .cmd_size = sizeof(struct nsp32_cmd_priv),
};
#include "nsp32_io.h"
@@ -946,14 +947,9 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt)
show_command(SCpnt);
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
+ nsp32_priv(SCpnt)->status = SAM_STAT_CHECK_CONDITION;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
- SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
- SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
- SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
-
/* initialize data */
data->msgout_len = 0;
data->msgin_len = 0;
@@ -1376,7 +1372,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
case BUSPHASE_STATUS:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status");
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
break;
default:
@@ -1687,18 +1683,18 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
/* MsgIn 00: Command Complete */
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
- SCpnt->SCp.Status, scsi_get_resid(SCpnt));
+ nsp32_priv(SCpnt)->status, scsi_get_resid(SCpnt));
SCpnt->result = (DID_OK << 16) |
- (SCpnt->SCp.Status << 0);
+ (nsp32_priv(SCpnt)->status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
- SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
+ nsp32_priv(SCpnt)->status = nsp32_read1(base, SCSI_CSB_IN);
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
@@ -1706,8 +1702,6 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
/* Unexpected bus free */
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
- /* DID_ERROR? */
- //SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
diff --git a/drivers/scsi/nsp32.h b/drivers/scsi/nsp32.h
index ab0726c070f7..924889f8bd37 100644
--- a/drivers/scsi/nsp32.h
+++ b/drivers/scsi/nsp32.h
@@ -534,6 +534,15 @@ typedef struct _nsp32_sync_table {
---PERIOD-- ---OFFSET-- */
#define TO_SYNCREG(period, offset) (((period) & 0x0f) << 4 | ((offset) & 0x0f))
+struct nsp32_cmd_priv {
+ enum sam_status status;
+};
+
+static inline struct nsp32_cmd_priv *nsp32_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
typedef struct _nsp32_target {
unsigned char syncreg; /* value for SYNCREG */
unsigned char ackwidth; /* value for ACKWIDTH */
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index df82a349e969..6a6621728c69 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -40,13 +40,16 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/ioport.h>
-#include <scsi/scsi.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <scsi/scsi_ioctl.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include "aha152x.h"
#include <pcmcia/cistpl.h>
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 8b9e889bc306..48acab03a8a0 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -41,10 +41,9 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <../drivers/scsi/scsi.h>
-#include <scsi/scsi_host.h>
-
#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <pcmcia/cistpl.h>
@@ -71,6 +70,11 @@ static bool free_ports = 0;
module_param(free_ports, bool, 0);
MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))");
+static struct scsi_pointer *nsp_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static struct scsi_host_template nsp_driver_template = {
.proc_name = "nsp_cs",
.show_info = nsp_show_info,
@@ -84,6 +88,7 @@ static struct scsi_host_template nsp_driver_template = {
.this_id = NSP_INITIATOR_ID,
.sg_tablesize = SG_ALL,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
@@ -181,8 +186,9 @@ static void nsp_scsi_done(struct scsi_cmnd *SCpnt)
scsi_done(SCpnt);
}
-static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt)
+static int nsp_queuecommand_lck(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
#ifdef NSP_DEBUG
/*unsigned int host_id = SCpnt->device->host->this_id;*/
/*unsigned int base = SCpnt->device->host->io_port;*/
@@ -218,11 +224,11 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt)
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
- SCpnt->SCp.Message = 0;
- SCpnt->SCp.have_data_in = IO_UNKNOWN;
- SCpnt->SCp.sent_command = 0;
- SCpnt->SCp.phase = PH_UNDETERMINED;
+ scsi_pointer->Status = SAM_STAT_CHECK_CONDITION;
+ scsi_pointer->Message = 0;
+ scsi_pointer->have_data_in = IO_UNKNOWN;
+ scsi_pointer->sent_command = 0;
+ scsi_pointer->phase = PH_UNDETERMINED;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
/* setup scratch area
@@ -232,18 +238,18 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt)
SCp.buffers_residual : left buffers in list
SCp.phase : current state of the command */
if (scsi_bufflen(SCpnt)) {
- SCpnt->SCp.buffer = scsi_sglist(SCpnt);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
+ scsi_pointer->buffer = scsi_sglist(SCpnt);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->buffers_residual = scsi_sg_count(SCpnt) - 1;
} else {
- SCpnt->SCp.ptr = NULL;
- SCpnt->SCp.this_residual = 0;
- SCpnt->SCp.buffer = NULL;
- SCpnt->SCp.buffers_residual = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->buffers_residual = 0;
}
- if (nsphw_start_selection(SCpnt) == FALSE) {
+ if (!nsphw_start_selection(SCpnt)) {
nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "selection fail");
SCpnt->result = DID_BUS_BUSY << 16;
nsp_scsi_done(SCpnt);
@@ -263,14 +269,14 @@ static DEF_SCSI_QCMD(nsp_queuecommand)
/*
* setup PIO FIFO transfer mode and enable/disable to data out
*/
-static void nsp_setup_fifo(nsp_hw_data *data, int enabled)
+static void nsp_setup_fifo(nsp_hw_data *data, bool enabled)
{
unsigned int base = data->BaseAddress;
unsigned char transfer_mode_reg;
//nsp_dbg(NSP_DEBUG_DATA_IO, "enabled=%d", enabled);
- if (enabled != FALSE) {
+ if (enabled) {
transfer_mode_reg = TRANSFER_GO | BRAIND;
} else {
transfer_mode_reg = 0;
@@ -298,7 +304,7 @@ static void nsphw_init_sync(nsp_hw_data *data)
/*
* Initialize Ninja hardware
*/
-static int nsphw_init(nsp_hw_data *data)
+static void nsphw_init(nsp_hw_data *data)
{
unsigned int base = data->BaseAddress;
@@ -348,16 +354,15 @@ static int nsphw_init(nsp_hw_data *data)
SCSI_RESET_IRQ_EI );
nsp_write(base, IRQCONTROL, IRQCONTROL_ALLCLEAR);
- nsp_setup_fifo(data, FALSE);
-
- return TRUE;
+ nsp_setup_fifo(data, false);
}
/*
* Start selection phase
*/
-static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
+static bool nsphw_start_selection(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int host_id = SCpnt->device->host->this_id;
unsigned int base = SCpnt->device->host->io_port;
unsigned char target = scmd_id(SCpnt);
@@ -370,12 +375,12 @@ static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
phase = nsp_index_read(base, SCSIBUSMON);
if(phase != BUSMON_BUS_FREE) {
//nsp_dbg(NSP_DEBUG_RESELECTION, "bus busy");
- return FALSE;
+ return false;
}
/* start arbitration */
//nsp_dbg(NSP_DEBUG_RESELECTION, "start arbit");
- SCpnt->SCp.phase = PH_ARBSTART;
+ scsi_pointer->phase = PH_ARBSTART;
nsp_index_write(base, SETARBIT, ARBIT_GO);
time_out = 1000;
@@ -390,12 +395,12 @@ static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
if (!(arbit & ARBIT_WIN)) {
//nsp_dbg(NSP_DEBUG_RESELECTION, "arbit fail");
nsp_index_write(base, SETARBIT, ARBIT_FLAG_CLEAR);
- return FALSE;
+ return false;
}
/* assert select line */
//nsp_dbg(NSP_DEBUG_RESELECTION, "assert SEL line");
- SCpnt->SCp.phase = PH_SELSTART;
+ scsi_pointer->phase = PH_SELSTART;
udelay(3); /* wait 2.4us */
nsp_index_write(base, SCSIDATALATCH, BIT(host_id) | BIT(target));
nsp_index_write(base, SCSIBUSCTRL, SCSI_SEL | SCSI_BSY | SCSI_ATN);
@@ -409,7 +414,7 @@ static int nsphw_start_selection(struct scsi_cmnd *SCpnt)
nsp_start_timer(SCpnt, 1000/51);
data->SelectionTimeOut = 1;
- return TRUE;
+ return true;
}
struct nsp_sync_table {
@@ -479,7 +484,7 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt)
sync->SyncRegister = 0;
sync->AckWidth = 0;
- return FALSE;
+ return false;
}
sync->SyncRegister = (sync_table->chip_period << SYNCREG_PERIOD_SHIFT) |
@@ -488,7 +493,7 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt)
nsp_dbg(NSP_DEBUG_SYNC, "sync_reg=0x%x, ack_width=0x%x", sync->SyncRegister, sync->AckWidth);
- return TRUE;
+ return true;
}
@@ -571,8 +576,9 @@ static int nsp_expect_signal(struct scsi_cmnd *SCpnt,
/*
* transfer SCSI message
*/
-static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
+static int nsp_xfer(struct scsi_cmnd *const SCpnt, int phase)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
char *buf = data->MsgBuffer;
@@ -590,7 +596,7 @@ static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
}
/* if last byte, negate ATN */
- if (len == 1 && SCpnt->SCp.phase == PH_MSG_OUT) {
+ if (len == 1 && scsi_pointer->phase == PH_MSG_OUT) {
nsp_index_write(base, SCSIBUSCTRL, AUTODIRECTION | ACKENB);
}
@@ -611,14 +617,15 @@ static int nsp_xfer(struct scsi_cmnd *SCpnt, int phase)
/*
* get extra SCSI data from fifo
*/
-static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
+static int nsp_dataphase_bypass(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
unsigned int count;
//nsp_dbg(NSP_DEBUG_DATA_IO, "in");
- if (SCpnt->SCp.have_data_in != IO_IN) {
+ if (scsi_pointer->have_data_in != IO_IN) {
return 0;
}
@@ -633,9 +640,9 @@ static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
* data phase skip only occures in case of SCSI_LOW_READ
*/
nsp_dbg(NSP_DEBUG_DATA_IO, "use bypass quirk");
- SCpnt->SCp.phase = PH_DATA;
+ scsi_pointer->phase = PH_DATA;
nsp_pio_read(SCpnt);
- nsp_setup_fifo(data, FALSE);
+ nsp_setup_fifo(data, false);
return 0;
}
@@ -643,7 +650,7 @@ static int nsp_dataphase_bypass(struct scsi_cmnd *SCpnt)
/*
* accept reselection
*/
-static int nsp_reselected(struct scsi_cmnd *SCpnt)
+static void nsp_reselected(struct scsi_cmnd *SCpnt)
{
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
@@ -675,8 +682,6 @@ static int nsp_reselected(struct scsi_cmnd *SCpnt)
bus_reg = nsp_index_read(base, SCSIBUSCTRL) & ~(SCSI_BSY | SCSI_ATN);
nsp_index_write(base, SCSIBUSCTRL, bus_reg);
nsp_index_write(base, SCSIBUSCTRL, bus_reg | AUTODIRECTION | ACKENB);
-
- return TRUE;
}
/*
@@ -709,8 +714,9 @@ static int nsp_fifo_count(struct scsi_cmnd *SCpnt)
/*
* read data in DATA IN phase
*/
-static void nsp_pio_read(struct scsi_cmnd *SCpnt)
+static void nsp_pio_read(struct scsi_cmnd *const SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
unsigned long mmio_base = SCpnt->device->host->base;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
@@ -721,24 +727,25 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in SCpnt=0x%p resid=%d ocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d",
- SCpnt, scsi_get_resid(SCpnt), ocount, SCpnt->SCp.ptr,
- SCpnt->SCp.this_residual, SCpnt->SCp.buffer,
- SCpnt->SCp.buffers_residual);
+ SCpnt, scsi_get_resid(SCpnt), ocount, scsi_pointer->ptr,
+ scsi_pointer->this_residual, scsi_pointer->buffer,
+ scsi_pointer->buffers_residual);
time_out = 1000;
while ((time_out-- != 0) &&
- (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0 ) ) {
+ (scsi_pointer->this_residual > 0 ||
+ scsi_pointer->buffers_residual > 0)) {
stat = nsp_index_read(base, SCSIBUSMON);
stat &= BUSMON_PHASE_MASK;
res = nsp_fifo_count(SCpnt) - ocount;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount, res);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x ocount=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount, res);
if (res == 0) { /* if some data available ? */
if (stat == BUSPHASE_DATA_IN) { /* phase changed? */
- //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", SCpnt->SCp.this_residual);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, " wait for data this=%d", scsi_pointer->this_residual);
continue;
} else {
nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x", stat);
@@ -752,20 +759,21 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
continue;
}
- res = min(res, SCpnt->SCp.this_residual);
+ res = min(res, scsi_pointer->this_residual);
switch (data->TransferMode) {
case MODE_IO32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_fifo32_read(base, SCpnt->SCp.ptr, res >> 2);
+ nsp_fifo32_read(base, scsi_pointer->ptr, res >> 2);
break;
case MODE_IO8:
- nsp_fifo8_read (base, SCpnt->SCp.ptr, res );
+ nsp_fifo8_read(base, scsi_pointer->ptr, res);
break;
case MODE_MEM32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_mmio_fifo32_read(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ nsp_mmio_fifo32_read(mmio_base, scsi_pointer->ptr,
+ res >> 2);
break;
default:
@@ -774,22 +782,23 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
}
nsp_inc_resid(SCpnt, -res);
- SCpnt->SCp.ptr += res;
- SCpnt->SCp.this_residual -= res;
+ scsi_pointer->ptr += res;
+ scsi_pointer->this_residual -= res;
ocount += res;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, ocount);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this_residual=0x%x ocount=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, ocount);
/* go to next scatter list if available */
- if (SCpnt->SCp.this_residual == 0 &&
- SCpnt->SCp.buffers_residual != 0 ) {
+ if (scsi_pointer->this_residual == 0 &&
+ scsi_pointer->buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next timeout=%d", time_out);
- SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ scsi_pointer->buffers_residual--;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
time_out = 1000;
- //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", SCpnt->SCp.buffer->page, SCpnt->SCp.buffer->offset);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "page: 0x%p, off: 0x%x", scsi_pointer->buffer->page, scsi_pointer->buffer->offset);
}
}
@@ -797,8 +806,8 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
if (time_out < 0) {
nsp_msg(KERN_DEBUG, "pio read timeout resid=%d this_residual=%d buffers_residual=%d",
- scsi_get_resid(SCpnt), SCpnt->SCp.this_residual,
- SCpnt->SCp.buffers_residual);
+ scsi_get_resid(SCpnt), scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual);
}
nsp_dbg(NSP_DEBUG_DATA_IO, "read ocount=0x%x", ocount);
nsp_dbg(NSP_DEBUG_DATA_IO, "r cmd=%d resid=0x%x\n", data->CmdId,
@@ -810,6 +819,7 @@ static void nsp_pio_read(struct scsi_cmnd *SCpnt)
*/
static void nsp_pio_write(struct scsi_cmnd *SCpnt)
{
+ struct scsi_pointer *scsi_pointer = nsp_priv(SCpnt);
unsigned int base = SCpnt->device->host->io_port;
unsigned long mmio_base = SCpnt->device->host->base;
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
@@ -820,14 +830,15 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
ocount = data->FifoCount;
nsp_dbg(NSP_DEBUG_DATA_IO, "in fifocount=%d ptr=0x%p this_residual=%d buffers=0x%p nbuf=%d resid=0x%x",
- data->FifoCount, SCpnt->SCp.ptr, SCpnt->SCp.this_residual,
- SCpnt->SCp.buffer, SCpnt->SCp.buffers_residual,
+ data->FifoCount, scsi_pointer->ptr, scsi_pointer->this_residual,
+ scsi_pointer->buffer, scsi_pointer->buffers_residual,
scsi_get_resid(SCpnt));
time_out = 1000;
while ((time_out-- != 0) &&
- (SCpnt->SCp.this_residual > 0 || SCpnt->SCp.buffers_residual > 0)) {
+ (scsi_pointer->this_residual > 0 ||
+ scsi_pointer->buffers_residual > 0)) {
stat = nsp_index_read(base, SCSIBUSMON);
stat &= BUSMON_PHASE_MASK;
@@ -837,9 +848,9 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
nsp_dbg(NSP_DEBUG_DATA_IO, "phase changed stat=0x%x, res=%d\n", stat, res);
/* Put back pointer */
nsp_inc_resid(SCpnt, res);
- SCpnt->SCp.ptr -= res;
- SCpnt->SCp.this_residual += res;
- ocount -= res;
+ scsi_pointer->ptr -= res;
+ scsi_pointer->this_residual += res;
+ ocount -= res;
break;
}
@@ -850,21 +861,22 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
continue;
}
- res = min(SCpnt->SCp.this_residual, WFIFO_CRIT);
+ res = min(scsi_pointer->this_residual, WFIFO_CRIT);
- //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", SCpnt->SCp.ptr, SCpnt->SCp.this_residual, res);
+ //nsp_dbg(NSP_DEBUG_DATA_IO, "ptr=0x%p this=0x%x res=0x%x", scsi_pointer->ptr, scsi_pointer->this_residual, res);
switch (data->TransferMode) {
case MODE_IO32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_fifo32_write(base, SCpnt->SCp.ptr, res >> 2);
+ nsp_fifo32_write(base, scsi_pointer->ptr, res >> 2);
break;
case MODE_IO8:
- nsp_fifo8_write (base, SCpnt->SCp.ptr, res );
+ nsp_fifo8_write(base, scsi_pointer->ptr, res);
break;
case MODE_MEM32:
res &= ~(BIT(1)|BIT(0)); /* align 4 */
- nsp_mmio_fifo32_write(mmio_base, SCpnt->SCp.ptr, res >> 2);
+ nsp_mmio_fifo32_write(mmio_base, scsi_pointer->ptr,
+ res >> 2);
break;
default:
@@ -873,18 +885,19 @@ static void nsp_pio_write(struct scsi_cmnd *SCpnt)
}
nsp_inc_resid(SCpnt, -res);
- SCpnt->SCp.ptr += res;
- SCpnt->SCp.this_residual -= res;
- ocount += res;
+ scsi_pointer->ptr += res;
+ scsi_pointer->this_residual -= res;
+ ocount += res;
/* go to next scatter list if available */
- if (SCpnt->SCp.this_residual == 0 &&
- SCpnt->SCp.buffers_residual != 0 ) {
+ if (scsi_pointer->this_residual == 0 &&
+ scsi_pointer->buffers_residual != 0 ) {
//nsp_dbg(NSP_DEBUG_DATA_IO, "scatterlist next");
- SCpnt->SCp.buffers_residual--;
- SCpnt->SCp.buffer = sg_next(SCpnt->SCp.buffer);
- SCpnt->SCp.ptr = BUFFER_ADDR;
- SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
+ scsi_pointer->buffers_residual--;
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ scsi_pointer->ptr = BUFFER_ADDR(SCpnt);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
time_out = 1000;
}
}
@@ -931,7 +944,7 @@ static int nsp_nexus(struct scsi_cmnd *SCpnt)
}
/* setup pdma fifo */
- nsp_setup_fifo(data, TRUE);
+ nsp_setup_fifo(data, true);
/* clear ack counter */
data->FifoCount = 0;
@@ -952,6 +965,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
unsigned int base;
unsigned char irq_status, irq_phase, phase;
struct scsi_cmnd *tmpSC;
+ struct scsi_pointer *scsi_pointer;
unsigned char target, lun;
unsigned int *sync_neg;
int i, tmp;
@@ -1030,9 +1044,10 @@ static irqreturn_t nspintr(int irq, void *dev_id)
if(data->CurrentSC != NULL) {
tmpSC = data->CurrentSC;
- tmpSC->result = (DID_RESET << 16) |
- ((tmpSC->SCp.Message & 0xff) << 8) |
- ((tmpSC->SCp.Status & 0xff) << 0);
+ scsi_pointer = nsp_priv(tmpSC);
+ tmpSC->result = (DID_RESET << 16) |
+ ((scsi_pointer->Message & 0xff) << 8) |
+ ((scsi_pointer->Status & 0xff) << 0);
nsp_scsi_done(tmpSC);
}
return IRQ_HANDLED;
@@ -1046,6 +1061,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
}
tmpSC = data->CurrentSC;
+ scsi_pointer = nsp_priv(tmpSC);
target = tmpSC->device->id;
lun = tmpSC->device->lun;
sync_neg = &(data->Sync[target].SyncNegotiation);
@@ -1057,9 +1073,8 @@ static irqreturn_t nspintr(int irq, void *dev_id)
if (irq_phase & RESELECT_IRQ) {
nsp_dbg(NSP_DEBUG_INTR, "reselect");
nsp_write(base, IRQCONTROL, IRQCONTROL_RESELECT_CLEAR);
- if (nsp_reselected(tmpSC) != FALSE) {
- return IRQ_HANDLED;
- }
+ nsp_reselected(tmpSC);
+ return IRQ_HANDLED;
}
if ((irq_phase & (PHASE_CHANGE_IRQ | LATCHED_BUS_FREE)) == 0) {
@@ -1069,7 +1084,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//show_phase(tmpSC);
- switch(tmpSC->SCp.phase) {
+ switch (scsi_pointer->phase) {
case PH_SELSTART:
// *sync_neg = SYNC_NOT_YET;
if ((phase & BUSMON_BSY) == 0) {
@@ -1092,7 +1107,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
/* attention assert */
//nsp_dbg(NSP_DEBUG_INTR, "attention assert");
data->SelectionTimeOut = 0;
- tmpSC->SCp.phase = PH_SELECTED;
+ scsi_pointer->phase = PH_SELECTED;
nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN);
udelay(1);
nsp_index_write(base, SCSIBUSCTRL, SCSI_ATN | AUTODIRECTION | ACKENB);
@@ -1121,17 +1136,18 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//nsp_dbg(NSP_DEBUG_INTR, "start scsi seq");
/* normal disconnect */
- if (((tmpSC->SCp.phase == PH_MSG_IN) || (tmpSC->SCp.phase == PH_MSG_OUT)) &&
- (irq_phase & LATCHED_BUS_FREE) != 0 ) {
+ if ((scsi_pointer->phase == PH_MSG_IN ||
+ scsi_pointer->phase == PH_MSG_OUT) &&
+ (irq_phase & LATCHED_BUS_FREE) != 0) {
nsp_dbg(NSP_DEBUG_INTR, "normal disconnect irq_status=0x%x, phase=0x%x, irq_phase=0x%x", irq_status, phase, irq_phase);
//*sync_neg = SYNC_NOT_YET;
/* all command complete and return status */
- if (tmpSC->SCp.Message == COMMAND_COMPLETE) {
- tmpSC->result = (DID_OK << 16) |
- ((tmpSC->SCp.Message & 0xff) << 8) |
- ((tmpSC->SCp.Status & 0xff) << 0);
+ if (scsi_pointer->Message == COMMAND_COMPLETE) {
+ tmpSC->result = (DID_OK << 16) |
+ ((scsi_pointer->Message & 0xff) << 8) |
+ ((scsi_pointer->Status & 0xff) << 0);
nsp_dbg(NSP_DEBUG_INTR, "command complete result=0x%x", tmpSC->result);
nsp_scsi_done(tmpSC);
@@ -1160,7 +1176,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
return IRQ_HANDLED;
}
- tmpSC->SCp.phase = PH_COMMAND;
+ scsi_pointer->phase = PH_COMMAND;
nsp_nexus(tmpSC);
@@ -1176,8 +1192,8 @@ static irqreturn_t nspintr(int irq, void *dev_id)
case BUSPHASE_DATA_OUT:
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_OUT");
- tmpSC->SCp.phase = PH_DATA;
- tmpSC->SCp.have_data_in = IO_OUT;
+ scsi_pointer->phase = PH_DATA;
+ scsi_pointer->have_data_in = IO_OUT;
nsp_pio_write(tmpSC);
@@ -1186,8 +1202,8 @@ static irqreturn_t nspintr(int irq, void *dev_id)
case BUSPHASE_DATA_IN:
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_DATA_IN");
- tmpSC->SCp.phase = PH_DATA;
- tmpSC->SCp.have_data_in = IO_IN;
+ scsi_pointer->phase = PH_DATA;
+ scsi_pointer->have_data_in = IO_IN;
nsp_pio_read(tmpSC);
@@ -1197,10 +1213,11 @@ static irqreturn_t nspintr(int irq, void *dev_id)
nsp_dataphase_bypass(tmpSC);
nsp_dbg(NSP_DEBUG_INTR, "BUSPHASE_STATUS");
- tmpSC->SCp.phase = PH_STATUS;
+ scsi_pointer->phase = PH_STATUS;
- tmpSC->SCp.Status = nsp_index_read(base, SCSIDATAWITHACK);
- nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x", tmpSC->SCp.Message, tmpSC->SCp.Status);
+ scsi_pointer->Status = nsp_index_read(base, SCSIDATAWITHACK);
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x status=0x%x",
+ scsi_pointer->Message, scsi_pointer->Status);
break;
@@ -1210,12 +1227,12 @@ static irqreturn_t nspintr(int irq, void *dev_id)
goto timer_out;
}
- tmpSC->SCp.phase = PH_MSG_OUT;
+ scsi_pointer->phase = PH_MSG_OUT;
//*sync_neg = SYNC_NOT_YET;
data->MsgLen = i = 0;
- data->MsgBuffer[i] = IDENTIFY(TRUE, lun); i++;
+ data->MsgBuffer[i] = IDENTIFY(true, lun); i++;
if (*sync_neg == SYNC_NOT_YET) {
data->Sync[target].SyncPeriod = 0;
@@ -1243,7 +1260,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
goto timer_out;
}
- tmpSC->SCp.phase = PH_MSG_IN;
+ scsi_pointer->phase = PH_MSG_IN;
nsp_message_in(tmpSC);
/**/
@@ -1275,9 +1292,10 @@ static irqreturn_t nspintr(int irq, void *dev_id)
i += (1 + data->MsgBuffer[i+1]);
}
}
- tmpSC->SCp.Message = tmp;
+ scsi_pointer->Message = tmp;
- nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d", tmpSC->SCp.Message, data->MsgLen);
+ nsp_dbg(NSP_DEBUG_INTR, "message=0x%x len=%d",
+ scsi_pointer->Message, data->MsgLen);
show_message(data);
break;
@@ -1557,6 +1575,9 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
data->MmioAddress = (unsigned long)
ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
+ if (!data->MmioAddress)
+ goto next_entry;
+
data->MmioLength = resource_size(p_dev->resource[2]);
}
/* If we got this far, we're cool! */
@@ -1611,9 +1632,7 @@ static int nsp_cs_config(struct pcmcia_device *link)
nsp_dbg(NSP_DEBUG_INIT, "I/O[0x%x+0x%x] IRQ %d",
data->BaseAddress, data->NumAddress, data->IrqNumber);
- if(nsphw_init(data) == FALSE) {
- goto cs_failed;
- }
+ nsphw_init(data);
host = nsp_detect(&nsp_driver_template);
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index 665bf8d0faf7..e1ee8ef90ad3 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -304,8 +304,8 @@ static int nsp_eh_host_reset (struct scsi_cmnd *SCpnt);
static int nsp_bus_reset (nsp_hw_data *data);
/* */
-static int nsphw_init (nsp_hw_data *data);
-static int nsphw_start_selection(struct scsi_cmnd *SCpnt);
+static void nsphw_init (nsp_hw_data *data);
+static bool nsphw_start_selection(struct scsi_cmnd *SCpnt);
static void nsp_start_timer (struct scsi_cmnd *SCpnt, int time);
static int nsp_fifo_count (struct scsi_cmnd *SCpnt);
static void nsp_pio_read (struct scsi_cmnd *SCpnt);
@@ -320,7 +320,7 @@ static int nsp_expect_signal (struct scsi_cmnd *SCpnt,
unsigned char mask);
static int nsp_xfer (struct scsi_cmnd *SCpnt, int phase);
static int nsp_dataphase_bypass (struct scsi_cmnd *SCpnt);
-static int nsp_reselected (struct scsi_cmnd *SCpnt);
+static void nsp_reselected (struct scsi_cmnd *SCpnt);
static struct Scsi_Host *nsp_detect(struct scsi_host_template *sht);
/* Interrupt handler */
@@ -371,7 +371,7 @@ enum _burst_mode {
};
/* scatter-gather table */
-# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
+#define BUFFER_ADDR(SCpnt) ((char *)(sg_virt(nsp_priv(SCpnt)->buffer)))
#endif /*__nsp_cs__*/
/* end */
diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c
index 6aa7d269d3b3..23b68dd26f74 100644
--- a/drivers/scsi/pcmcia/nsp_debug.c
+++ b/drivers/scsi/pcmcia/nsp_debug.c
@@ -145,7 +145,7 @@ static void show_command(struct scsi_cmnd *SCpnt)
static void show_phase(struct scsi_cmnd *SCpnt)
{
- int i = SCpnt->SCp.phase;
+ int i = nsp_scsi_pointer(SCpnt)->phase;
char *ph[] = {
"PH_UNDETERMINED",
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 828d53faf09a..310d0b6586a6 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -38,14 +38,17 @@
#include <linux/string.h>
#include <linux/ioport.h>
#include <asm/io.h>
-#include <scsi/scsi.h>
#include <linux/major.h>
#include <linux/blkdev.h>
-#include <scsi/scsi_ioctl.h>
#include <linux/interrupt.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include "../qlogicfas408.h"
#include <pcmcia/cistpl.h>
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index fc93d2a57e1e..5d7dfefd6f6c 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -192,6 +192,12 @@ struct sym53c500_data {
int fast_pio;
};
+struct sym53c500_cmd_priv {
+ int status;
+ int message;
+ int phase;
+};
+
enum Phase {
idle,
data_out,
@@ -351,6 +357,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct sym53c500_data *data =
(struct sym53c500_data *)dev->hostdata;
struct scsi_cmnd *curSC = data->current_SC;
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC);
int fast_pio = data->fast_pio;
spin_lock_irqsave(dev->host_lock, flags);
@@ -397,11 +404,11 @@ SYM53C500_intr(int irq, void *dev_id)
if (int_reg & 0x20) { /* Disconnect */
DEB(printk("SYM53C500: disconnect intr received\n"));
- if (curSC->SCp.phase != message_in) { /* Unexpected disconnect */
+ if (scp->phase != message_in) { /* Unexpected disconnect */
curSC->result = DID_NO_CONNECT << 16;
} else { /* Command complete, return status and message */
- curSC->result = (curSC->SCp.Status & 0xff)
- | ((curSC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+ curSC->result = (scp->status & 0xff) |
+ ((scp->message & 0xff) << 8) | (DID_OK << 16);
}
goto idle_out;
}
@@ -412,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- curSC->SCp.phase = data_out;
+ scp->phase = data_out;
VDEB(printk("SYM53C500: Data-Out phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -431,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id)
struct scatterlist *sg;
int i;
- curSC->SCp.phase = data_in;
+ scp->phase = data_in;
VDEB(printk("SYM53C500: Data-In phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -446,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id)
break;
case 0x02: /* COMMAND */
- curSC->SCp.phase = command_ph;
+ scp->phase = command_ph;
printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
break;
case 0x03: /* STATUS */
- curSC->SCp.phase = status_ph;
+ scp->phase = status_ph;
VDEB(printk("SYM53C500: Status phase\n"));
outb(FLUSH_FIFO, port_base + CMD_REG);
outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
@@ -464,22 +471,22 @@ SYM53C500_intr(int irq, void *dev_id)
case 0x06: /* MESSAGE-OUT */
DEB(printk("SYM53C500: Message-Out phase\n"));
- curSC->SCp.phase = message_out;
+ scp->phase = message_out;
outb(SET_ATN, port_base + CMD_REG); /* Reject the message */
outb(MSG_ACCEPT, port_base + CMD_REG);
break;
case 0x07: /* MESSAGE-IN */
VDEB(printk("SYM53C500: Message-In phase\n"));
- curSC->SCp.phase = message_in;
+ scp->phase = message_in;
- curSC->SCp.Status = inb(port_base + SCSI_FIFO);
- curSC->SCp.Message = inb(port_base + SCSI_FIFO);
+ scp->status = inb(port_base + SCSI_FIFO);
+ scp->message = inb(port_base + SCSI_FIFO);
VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
- DEB(printk("Status = %02x Message = %02x\n", curSC->SCp.Status, curSC->SCp.Message));
+ DEB(printk("Status = %02x Message = %02x\n", scp->status, scp->message));
- if (curSC->SCp.Message == SAVE_POINTERS || curSC->SCp.Message == DISCONNECT) {
+ if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) {
outb(SET_ATN, port_base + CMD_REG); /* Reject message */
DEB(printk("Discarding SAVE_POINTERS message\n"));
}
@@ -491,7 +498,7 @@ out:
return IRQ_HANDLED;
idle_out:
- curSC->SCp.phase = idle;
+ scp->phase = idle;
scsi_done(curSC);
goto out;
}
@@ -539,6 +546,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
{
+ struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt);
int i;
int port_base = SCpnt->device->host->io_port;
struct sym53c500_data *data =
@@ -555,9 +563,9 @@ static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
VDEB(printk("\n"));
data->current_SC = SCpnt;
- data->current_SC->SCp.phase = command_ph;
- data->current_SC->SCp.Status = 0;
- data->current_SC->SCp.Message = 0;
+ scp->phase = command_ph;
+ scp->status = 0;
+ scp->message = 0;
/* We are locked here already by the mid layer */
REG0(port_base);
@@ -671,7 +679,8 @@ static struct scsi_host_template sym53c500_driver_template = {
.can_queue = 1,
.this_id = 7,
.sg_tablesize = 32,
- .shost_groups = SYM53C500_shost_groups
+ .shost_groups = SYM53C500_shost_groups,
+ .cmd_size = sizeof(struct sym53c500_cmd_priv),
};
static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index 02b7338999cc..bbb51b7312f1 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -6,9 +6,12 @@
obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+
+CFLAGS_pm80xx_tracepoints.o := -I$(src)
+
pm80xx-y += pm8001_init.o \
pm8001_sas.o \
pm8001_ctl.o \
pm8001_hwi.o \
- pm80xx_hwi.o
-
+ pm80xx_hwi.o \
+ pm80xx_tracepoints.o
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 397eb9f6a1dd..73f036bed128 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -721,12 +721,15 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
struct fw_control_info *fwControl;
- u32 partitionSize, partitionSizeTmp;
+ __be32 partitionSizeTmp;
+ u32 partitionSize;
u32 loopNumber, loopcount;
struct pm8001_fw_image_header *image_hdr;
u32 sizeRead = 0;
u32 ret = 0;
u32 length = 1024 * 16 + sizeof(*payload) - 1;
+ u32 fc_len;
+ u8 *read_buf;
if (pm8001_ha->fw_image->size < 28) {
pm8001_ha->fw_status = FAIL_FILE_SIZE;
@@ -740,7 +743,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
while (sizeRead < pm8001_ha->fw_image->size) {
partitionSizeTmp =
- *(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
+ *(__be32 *)((u8 *)&image_hdr->image_length + sizeRead);
partitionSize = be32_to_cpu(partitionSizeTmp);
loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN,
IOCTL_BUF_SIZE);
@@ -755,36 +758,35 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
fwControl->retcode = 0;/* OUT */
fwControl->offset = loopNumber * IOCTL_BUF_SIZE;/*OUT */
- /* for the last chunk of data in case file size is not even with
- 4k, load only the rest*/
- if (((loopcount-loopNumber) == 1) &&
- ((partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE)) {
- fwControl->len =
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
- memcpy((u8 *)fwControl->buffer,
- (u8 *)pm8001_ha->fw_image->data + sizeRead,
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE);
- sizeRead +=
- (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
- } else {
- memcpy((u8 *)fwControl->buffer,
- (u8 *)pm8001_ha->fw_image->data + sizeRead,
- IOCTL_BUF_SIZE);
- sizeRead += IOCTL_BUF_SIZE;
- }
-
- pm8001_ha->nvmd_completion = &completion;
- ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
- if (ret) {
- pm8001_ha->fw_status = FAIL_OUT_MEMORY;
- goto out;
- }
- wait_for_completion(&completion);
- if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
- pm8001_ha->fw_status = fwControl->retcode;
- ret = -EFAULT;
- goto out;
- }
+ /*
+ * for the last chunk of data in case file size is
+ * not even with 4k, load only the rest
+ */
+
+ read_buf = (u8 *)pm8001_ha->fw_image->data + sizeRead;
+ fc_len = (partitionSize + HEADER_LEN) % IOCTL_BUF_SIZE;
+
+ if (loopcount - loopNumber == 1 && fc_len) {
+ fwControl->len = fc_len;
+ memcpy((u8 *)fwControl->buffer, read_buf, fc_len);
+ sizeRead += fc_len;
+ } else {
+ memcpy((u8 *)fwControl->buffer, read_buf, IOCTL_BUF_SIZE);
+ sizeRead += IOCTL_BUF_SIZE;
+ }
+
+ pm8001_ha->nvmd_completion = &completion;
+ ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
+ if (ret) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ goto out;
+ }
+ wait_for_completion(&completion);
+ if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
+ pm8001_ha->fw_status = fwControl->retcode;
+ ret = -EFAULT;
+ goto out;
+ }
}
}
out:
@@ -889,14 +891,6 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
-/**
- * ctl_mpi_state_show - controller MPI state check
- * @cdev: pointer to embedded class device
- * @buf: the buffer returned
- *
- * A sysfs 'read-only' shost attribute.
- */
-
static const char *const mpiStateText[] = {
"MPI is not initialized",
"MPI is successfully initialized",
@@ -904,6 +898,14 @@ static const char *const mpiStateText[] = {
"MPI initialization failed with error in [31:16]"
};
+/**
+ * ctl_mpi_state_show - controller MPI state check
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t ctl_mpi_state_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -920,11 +922,11 @@ static DEVICE_ATTR_RO(ctl_mpi_state);
/**
* ctl_hmi_error_show - controller MPI initialization fails
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_hmi_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -941,11 +943,11 @@ static DEVICE_ATTR_RO(ctl_hmi_error);
/**
* ctl_raae_count_show - controller raae count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_raae_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -962,11 +964,11 @@ static DEVICE_ATTR_RO(ctl_raae_count);
/**
* ctl_iop0_count_show - controller iop0 count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_iop0_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -983,11 +985,11 @@ static DEVICE_ATTR_RO(ctl_iop0_count);
/**
* ctl_iop1_count_show - controller iop1 count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_iop1_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 124cb69740c6..628b08ba6770 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -42,6 +42,7 @@
#include "pm8001_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+ #include "pm80xx_tracepoints.h"
/**
* read_main_config_table - read the configure table and save it.
@@ -698,6 +699,10 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
return 0;
}
+static void pm8001_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
+}
+
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
{
u32 max_wait_count;
@@ -1308,24 +1313,29 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
* pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
* FW to tell the fw to get this message from IOMB.
* @pm8001_ha: our hba card information
- * @circularQ: the inbound queue we want to transfer to HBA.
+ * @q_index: the index in the inbound queue we want to transfer to HBA.
* @opCode: the operation code represents commands which LLDD and fw recognized.
* @payload: the command payload of each operation command.
* @nb: size in bytes of the command payload
* @responseQueue: queue to interrupt on w/ command response (if any)
*/
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
- struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, size_t nb,
+ u32 q_index, u32 opCode, void *payload, size_t nb,
u32 responseQueue)
{
u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
void *pMessage;
unsigned long flags;
- int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
+ struct inbound_queue_table *circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
int rv;
+ u32 htag = le32_to_cpu(*(__le32 *)payload);
+
+ trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index,
+ circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index));
+
+ if (WARN_ON(q_index >= pm8001_ha->max_q_num))
+ return -EINVAL;
- WARN_ON(q_index >= PM8001_MAX_INB_NUM);
spin_lock_irqsave(&circularQ->iq_lock, flags);
rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
&pMessage);
@@ -1515,7 +1525,6 @@ void pm8001_work_fn(struct work_struct *work)
case IO_XFER_ERROR_BREAK:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
- u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
@@ -1537,8 +1546,8 @@ void pm8001_work_fn(struct work_struct *work)
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
- tag = ccb->ccb_tag;
- if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
+ (ccb->task == t))
break;
}
if (!ccb) {
@@ -1554,17 +1563,16 @@ void pm8001_work_fn(struct work_struct *work)
atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&t->task_state_lock, flags1);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, pw->handler, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags1);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
t->task_done(t);
@@ -1573,7 +1581,6 @@ void pm8001_work_fn(struct work_struct *work)
case IO_XFER_OPEN_RETRY_TIMEOUT:
{ /* This one stashes the sas_task instead */
struct sas_task *t = (struct sas_task *)pm8001_dev;
- u32 tag;
struct pm8001_ccb_info *ccb;
struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
unsigned long flags, flags1;
@@ -1607,8 +1614,8 @@ void pm8001_work_fn(struct work_struct *work)
/* Search for a possible ccb that matches the task */
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
- tag = ccb->ccb_tag;
- if ((tag != 0xFFFFFFFF) && (ccb->task == t))
+ if ((ccb->ccb_tag != PM8001_INVALID_TAG) &&
+ (ccb->task == t))
break;
}
if (!ccb) {
@@ -1679,19 +1686,13 @@ void pm8001_work_fn(struct work_struct *work)
struct task_status_struct *ts;
struct sas_task *task;
int i;
- u32 tag, device_id;
+ u32 device_id;
for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
ccb = &pm8001_ha->ccb_info[i];
task = ccb->task;
ts = &task->task_status;
- tag = ccb->ccb_tag;
- /* check if tag is NULL */
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL,
- "tag Null\n");
- continue;
- }
+
if (task != NULL) {
dev = task->dev;
if (!dev) {
@@ -1700,10 +1701,10 @@ void pm8001_work_fn(struct work_struct *work)
continue;
}
/*complete sas task and update to top layer */
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
ts->resp = SAS_TASK_COMPLETE;
task->task_done(task);
- } else if (tag != 0xFFFFFFFF) {
+ } else if (ccb->ccb_tag != PM8001_INVALID_TAG) {
/* complete the internal commands/non-sas task */
pm8001_dev = ccb->device;
if (pm8001_dev->dcompletion) {
@@ -1711,7 +1712,7 @@ void pm8001_work_fn(struct work_struct *work)
pm8001_dev->dcompletion = NULL;
}
complete(pm8001_ha->nvmd_completion);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
}
/* Deregister all the device ids */
@@ -1750,22 +1751,16 @@ int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
- int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
+ struct sas_task *task;
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_ABORT;
int ret;
- if (!pm8001_ha_dev) {
- pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
- return;
- }
+ pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG;
+ pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
@@ -1773,27 +1768,23 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
return;
-
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
-
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ }
memset(&task_abort, 0, sizeof(task_abort));
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- task_abort.tag = cpu_to_le32(ccb_tag);
-
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
- if (ret)
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ task_abort.tag = cpu_to_le32(ccb->ccb_tag);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
+ if (ret) {
+ sas_free_task(task);
+ pm8001_ccb_free(pm8001_ha, ccb);
+ }
}
static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
@@ -1801,36 +1792,26 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
{
struct sata_start_req sata_cmd;
int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct host_to_dev_fis fis;
struct domain_device *dev;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
- sas_free_task(task);
- pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
- return;
- }
-
- /* allocate domain device by ourselves as libsas
- * is not going to provide any
- */
+ /*
+ * Allocate domain device by ourselves as libsas is not going to
+ * provide any.
+ */
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
pm8001_dbg(pm8001_ha, FAIL,
"Domain device cannot be allocated\n");
return;
@@ -1838,16 +1819,16 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
task->dev = dev;
task->dev->lldd_dev = pm8001_ha_dev;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
+ kfree(dev);
+ return;
+ }
+
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
- memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
-
/* construct read log FIS */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.fis_type = 0x27;
@@ -1856,16 +1837,17 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
fis.lbal = 0x10;
fis.sector_count = 0x1;
- sata_cmd.tag = cpu_to_le32(ccb_tag);
+ memset(&sata_cmd, 0, sizeof(sata_cmd));
+ sata_cmd.tag = cpu_to_le32(ccb->ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
if (res) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
kfree(dev);
}
}
@@ -2098,16 +2080,15 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
@@ -2266,16 +2247,15 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
@@ -2304,21 +2284,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
- return;
- }
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@@ -2335,10 +2306,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
ts = &t->task_status;
- if (!ts) {
- pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
- return;
- }
if (status)
pm8001_dbg(pm8001_ha, IOERR,
@@ -2393,11 +2360,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
- (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
- /* set new bit for abort_all */
- pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
- /* clear bit for read log */
- pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
pm8001_send_abort_all(pm8001_ha, pm8001_dev);
/* Free the tag */
pm8001_tag_free(pm8001_ha, tag);
@@ -2419,7 +2382,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
len = sizeof(struct pio_setup_fis);
pm8001_dbg(pm8001_ha, IO,
"PIO read len = %d\n", len);
- } else if (t->ata_task.use_ncq) {
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
len);
@@ -2513,7 +2477,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@@ -2529,7 +2493,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@@ -2551,7 +2515,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@@ -2622,7 +2586,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_NON_OPERATIONAL);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@@ -2642,7 +2606,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_DS_IN_ERROR);
ts->resp = SAS_TASK_UNDELIVERED;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return;
}
break;
@@ -2666,17 +2630,16 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@@ -2693,16 +2656,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
-
- ccb = &pm8001_ha->ccb_info[tag];
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
- }
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2733,8 +2687,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2776,7 +2728,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2862,20 +2813,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- }
}
/*See the comments for mpi_ssp_completion */
@@ -3049,18 +2986,15 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@@ -3076,12 +3010,12 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
u32 device_id = le32_to_cpu(pPayload->device_id);
u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS;
u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;
- pm8001_dbg(pm8001_ha, MSG, "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n",
+
+ pm8001_dbg(pm8001_ha, MSG,
+ "Set device id = 0x%x state from 0x%x to 0x%x status = 0x%x!\n",
device_id, pds, nds, status);
complete(pm8001_dev->setds_completion);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3091,15 +3025,14 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 tag = le32_to_cpu(pPayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
u32 dlen_status = le32_to_cpu(pPayload->dlen_status);
+
complete(pm8001_ha->nvmd_completion);
pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n",
dlen_status);
}
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
void
@@ -3124,9 +3057,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
/* We should free tag during failure also, the tag is not being
* freed by requesting path anywhere.
*/
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return;
}
if (ir_tds_bn_dps_das_nvm & IPMode) {
@@ -3170,9 +3101,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
*/
complete(pm8001_ha->nvmd_completion);
pm8001_dbg(pm8001_ha, MSG, "Get nvmd data complete!\n");
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3209,7 +3138,7 @@ int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
*
* when HBA driver received the identify done event or initiate FIS received
* event(for SATA), it will invoke this function to notify the sas layer that
- * the sas toplogy has formed, please discover the the whole sas domain,
+ * the sas toplogy has formed, please discover the whole sas domain,
* while receive a broadcast(change) primitive just tell the sas
* layer to discover the changed domain rather than the whole domain.
*/
@@ -3220,15 +3149,6 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
if (!phy->phy_attached)
return;
- if (sas_phy->phy) {
- struct sas_phy *sphy = sas_phy->phy;
- sphy->negotiated_linkrate = sas_phy->linkrate;
- sphy->minimum_linkrate = phy->minimum_linkrate;
- sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sphy->maximum_linkrate = phy->maximum_linkrate;
- sphy->maximum_linkrate_hw = phy->maximum_linkrate;
- }
-
if (phy->phy_type & PORT_TYPE_SAS) {
struct sas_identify_frame *id;
id = (struct sas_identify_frame *)phy->frame_rcvd;
@@ -3252,26 +3172,22 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
switch (link_rate) {
case PHY_SPEED_120:
phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
break;
case PHY_SPEED_60:
phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
break;
case PHY_SPEED_30:
phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
case PHY_SPEED_15:
phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS;
- phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
break;
}
sas_phy->negotiated_linkrate = phy->sas_phy.linkrate;
- sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS;
+ sas_phy->maximum_linkrate_hw = phy->maximum_linkrate;
sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
- sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
- sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ sas_phy->maximum_linkrate = phy->maximum_linkrate;
+ sas_phy->minimum_linkrate = phy->minimum_linkrate;
}
/**
@@ -3319,17 +3235,14 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
struct hw_event_ack_req payload;
u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
- struct inbound_queue_table *circularQ;
-
memset((u8 *)&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
payload.tag = cpu_to_le32(1);
payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0x0F) << 4) | (port_id & 0x0F));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload, sizeof(payload), 0);
}
static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3583,9 +3496,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
}
complete(pm8001_dev->dcompletion);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, htag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return 0;
}
@@ -3618,6 +3529,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
(struct fw_flash_Update_resp *)(piomb + 4);
u32 tag = le32_to_cpu(ppayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
+
status = le32_to_cpu(ppayload->status);
switch (status) {
case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
@@ -3655,9 +3567,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
break;
}
kfree(ccb->fw_control_context);
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
complete(pm8001_ha->nvmd_completion);
return 0;
}
@@ -3692,10 +3602,6 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
status = le32_to_cpu(pPayload->status);
tag = le32_to_cpu(pPayload->tag);
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL, " TAG NULL. RETURNING !!!\n");
- return -1;
- }
scp = le32_to_cpu(pPayload->scp);
ccb = &pm8001_ha->ccb_info[tag];
@@ -3706,6 +3612,10 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL, " TASK NULL. RETURNING !!!\n");
return -1;
}
+
+ if (t->task_proto == SAS_PROTOCOL_INTERNAL_ABORT)
+ atomic_dec(&pm8001_dev->running_req);
+
ts = &t->task_status;
if (status != 0)
pm8001_dbg(pm8001_ha, FAIL, "task abort failed status 0x%x ,tag = 0x%x, scp= 0x%x\n",
@@ -3723,19 +3633,17 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();
if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) {
- pm8001_tag_free(pm8001_ha, tag);
sas_free_task(t);
- /* clear the flag */
- pm8001_dev->id &= 0xBFFFFFFF;
- } else
+ pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG;
+ } else {
t->task_done(t);
+ }
return 0;
}
@@ -4170,7 +4078,6 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
- struct inbound_queue_table *circularQ;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
@@ -4196,7 +4103,6 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
}
opc = OPC_INB_SMP_REQUEST;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
@@ -4207,8 +4113,8 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
smp_cmd.long_smp_req.long_resp_size =
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &smp_cmd, sizeof(smp_cmd), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc,
+ &smp_cmd, sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
@@ -4236,9 +4142,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
- int ret;
u64 phys_addr;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SSPINIIOSTART;
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
@@ -4254,7 +4158,6 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
@@ -4275,9 +4178,9 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd,
- sizeof(ssp_cmd), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &ssp_cmd,
+ sizeof(ssp_cmd), 0);
}
static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
@@ -4287,33 +4190,31 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
u32 tag = ccb->ccb_tag;
- int ret;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr;
u32 ATAP = 0x0;
u32 dir;
- struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- if (task->data_dir == DMA_NONE) {
+
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
- if (task->ata_task.use_ncq &&
- dev->sata_dev.class != ATA_DEV_ATAPI) {
- ATAP = 0x07; /* FPDMA */
- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
- }
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
@@ -4363,7 +4264,6 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags &
SAS_TASK_STATE_ABORTED))) {
@@ -4373,20 +4273,18 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
task, ts->resp,
ts->stat);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free_done(pm8001_ha, task,
- ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
return 0;
}
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
- return ret;
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
}
/**
@@ -4398,11 +4296,9 @@ static int
pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTART;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
/*
@@ -4419,9 +4315,9 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr,
pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/**
@@ -4433,17 +4329,15 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
u8 phy_id)
{
struct phy_stop_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTOP;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/*
@@ -4455,9 +4349,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
struct reg_dev_req payload;
u32 opc;
u32 stp_sspsmp_sata = 0x4;
- struct inbound_queue_table *circularQ;
u32 linkrate, phy_id;
- int rc, tag = 0xdeadbeef;
+ int rc;
struct pm8001_ccb_info *ccb;
u8 retryFlag = 0x1;
u16 firstBurstSize = 0;
@@ -4465,16 +4358,13 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return rc;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = tag;
- payload.tag = cpu_to_le32(tag);
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
if (flag == 1)
stp_sspsmp_sata = 0x02; /*direct attached sata */
else {
@@ -4501,8 +4391,12 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_ccb_free(pm8001_ha, ccb);
+
return rc;
}
@@ -4514,18 +4408,15 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
{
struct dereg_dev_req payload;
u32 opc = OPC_INB_DEREG_DEV_HANDLE;
- int ret;
- struct inbound_queue_table *circularQ;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(1);
payload.device_id = cpu_to_le32(device_id);
pm8001_dbg(pm8001_ha, MSG, "unregister device device_id = %d\n",
device_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
/**
@@ -4538,17 +4429,15 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 phyId, u32 phy_op)
{
struct local_phy_ctl_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+
memset(&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(1);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
static u32 pm8001_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4583,38 +4472,43 @@ pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
}
static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
- u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag)
+ u32 dev_id, enum sas_internal_abort type, u32 task_tag, u32 cmd_tag)
{
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
- int ret;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&task_abort, 0, sizeof(task_abort));
- if (ABORT_SINGLE == (flag & ABORT_MASK)) {
+ if (type == SAS_INTERNAL_ABORT_SINGLE) {
task_abort.abort_all = 0;
task_abort.device_id = cpu_to_le32(dev_id);
task_abort.tag_to_abort = cpu_to_le32(task_tag);
- task_abort.tag = cpu_to_le32(cmd_tag);
- } else if (ABORT_ALL == (flag & ABORT_MASK)) {
+ } else if (type == SAS_INTERNAL_ABORT_DEV) {
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(dev_id);
- task_abort.tag = cpu_to_le32(cmd_tag);
+ } else {
+ pm8001_dbg(pm8001_ha, EH, "unknown type (%d)\n", type);
+ return -EIO;
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
- return ret;
+
+ task_abort.tag = cpu_to_le32(cmd_tag);
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
}
/*
* pm8001_chip_abort_task - SAS abort task when error or exception happened.
*/
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
+ struct pm8001_ccb_info *ccb)
{
- u32 opc, device_id;
+ struct sas_task *task = ccb->task;
+ struct sas_internal_abort_task *abort = &task->abort_task;
+ struct pm8001_device *pm8001_dev = ccb->device;
int rc = TMF_RESP_FUNC_FAILED;
+ u32 opc, device_id;
+
pm8001_dbg(pm8001_ha, EH, "cmd_tag = %x, abort task tag = 0x%x\n",
- cmd_tag, task_tag);
+ ccb->ccb_tag, abort->tag);
if (pm8001_dev->dev_type == SAS_END_DEVICE)
opc = OPC_INB_SSP_ABORT;
else if (pm8001_dev->dev_type == SAS_SATA_DEV)
@@ -4622,8 +4516,8 @@ int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
else
opc = OPC_INB_SMP_ABORT;/* SMP */
device_id = pm8001_dev->device_id;
- rc = send_task_abort(pm8001_ha, opc, device_id, flag,
- task_tag, cmd_tag);
+ rc = send_task_abort(pm8001_ha, opc, device_id, abort->type,
+ abort->tag, ccb->ccb_tag);
if (rc != TMF_RESP_FUNC_COMPLETE)
pm8001_dbg(pm8001_ha, EH, "rc= %d\n", rc);
return rc;
@@ -4636,28 +4530,25 @@ int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
* @tmf: task management function.
*/
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
{
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
u32 opc = OPC_INB_SSPINITMSTART;
- struct inbound_queue_table *circularQ;
struct ssp_ini_tm_start_req sspTMCmd;
- int ret;
memset(&sspTMCmd, 0, sizeof(sspTMCmd));
sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id);
- sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed);
+ sspTMCmd.relate_tag = cpu_to_le32((u32)tmf->tag_of_task_to_be_managed);
sspTMCmd.tmf = cpu_to_le32(tmf->tmf);
memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
if (pm8001_ha->chip_id != chip_8001)
- sspTMCmd.ds_ads_m = 0x08;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd,
- sizeof(sspTMCmd), 0);
- return ret;
+ sspTMCmd.ds_ads_m = cpu_to_le32(0x08);
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sspTMCmd,
+ sizeof(sspTMCmd), 0);
}
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
@@ -4666,9 +4557,7 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_GET_NVMD_DATA;
u32 nvmd_type;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
- struct inbound_queue_table *circularQ;
struct get_nvm_data_req nvmd_req;
struct fw_control_ex *fw_control_context;
struct pm8001_ioctl_payload *ioctl_payload = payload;
@@ -4679,17 +4568,16 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
return -ENOMEM;
fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
fw_control_context->len = ioctl_payload->rd_length;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&nvmd_req, 0, sizeof(nvmd_req));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return rc;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
ccb->fw_control_context = fw_control_context;
- nvmd_req.tag = cpu_to_le32(tag);
+
+ nvmd_req.tag = cpu_to_le32(ccb->ccb_tag);
switch (nvmd_type) {
case TWI_DEVICE: {
@@ -4746,11 +4634,12 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
- sizeof(nvmd_req), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req,
+ sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
return rc;
}
@@ -4761,9 +4650,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_SET_NVMD_DATA;
u32 nvmd_type;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
- struct inbound_queue_table *circularQ;
struct set_nvm_data_req nvmd_req;
struct fw_control_ex *fw_control_context;
struct pm8001_ioctl_payload *ioctl_payload = payload;
@@ -4772,20 +4659,20 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
if (!fw_control_context)
return -ENOMEM;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
&ioctl_payload->func_specific,
ioctl_payload->wr_length);
memset(&nvmd_req, 0, sizeof(nvmd_req));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return -EBUSY;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
- ccb->ccb_tag = tag;
- nvmd_req.tag = cpu_to_le32(tag);
+
+ nvmd_req.tag = cpu_to_le32(ccb->ccb_tag);
switch (nvmd_type) {
case TWI_DEVICE: {
u32 twi_addr, twi_page_size;
@@ -4831,11 +4718,12 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
default:
break;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req,
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &nvmd_req,
sizeof(nvmd_req), 0);
if (rc) {
kfree(fw_control_context);
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
return rc;
}
@@ -4852,12 +4740,9 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
{
struct fw_flash_Update_req payload;
struct fw_flash_updata_info *info;
- struct inbound_queue_table *circularQ;
- int ret;
u32 opc = OPC_INB_FW_FLASH_UPDATE;
memset(&payload, 0, sizeof(struct fw_flash_Update_req));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
info = fw_flash_updata_info;
payload.tag = cpu_to_le32(tag);
payload.cur_image_len = cpu_to_le32(info->cur_image_len);
@@ -4868,9 +4753,9 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
payload.sgl_addr_hi =
cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
}
int
@@ -4881,7 +4766,6 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
struct fw_control_info *fw_control;
struct fw_control_ex *fw_control_context;
int rc;
- u32 tag;
struct pm8001_ccb_info *ccb;
void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
@@ -4905,16 +4789,21 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
fw_control_context->virtAddr = buffer;
fw_control_context->phys_addr = phys_addr;
fw_control_context->len = fw_control->len;
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc) {
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb) {
kfree(fw_control_context);
- return -EBUSY;
+ return -SAS_QUEUE_FULL;
}
- ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
- ccb->ccb_tag = tag;
+
rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info,
- tag);
+ ccb->ccb_tag);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_ccb_free(pm8001_ha, ccb);
+ }
+
return rc;
}
@@ -5001,59 +4890,59 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev, u32 state)
{
struct set_dev_state_req payload;
- struct inbound_queue_table *circularQ;
struct pm8001_ccb_info *ccb;
int rc;
- u32 tag;
u32 opc = OPC_INB_SET_DEVICE_STATE;
+
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return -1;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
- ccb->device = pm8001_dev;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(tag);
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
payload.device_id = cpu_to_le32(pm8001_dev->device_id);
payload.nds = cpu_to_le32(state);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
- return rc;
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_ccb_free(pm8001_ha, ccb);
+
+ return rc;
}
static int
pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
{
struct sas_re_initialization_req payload;
- struct inbound_queue_table *circularQ;
struct pm8001_ccb_info *ccb;
int rc;
- u32 tag;
u32 opc = OPC_INB_SAS_RE_INITIALIZE;
+
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return -ENOMEM;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->ccb_tag = tag;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
- payload.tag = cpu_to_le32(tag);
+
+ ccb = pm8001_ccb_alloc(pm8001_ha, NULL, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
payload.SSAHOLT = cpu_to_le32(0xd << 25);
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
if (rc)
- pm8001_tag_free(pm8001_ha, tag);
- return rc;
+ pm8001_ccb_free(pm8001_ha, ccb);
+ return rc;
}
const struct pm8001_dispatch pm8001_8001_dispatch = {
.name = "pmc8001",
.chip_init = pm8001_chip_init,
+ .chip_post_init = pm8001_chip_post_init,
.chip_soft_rst = pm8001_chip_soft_rst,
.chip_rst = pm8001_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index d1f3aa93325b..961d0465b923 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -434,11 +434,6 @@ struct task_abort_req {
u32 reserved[11];
} __attribute__((packed, aligned(4)));
-/* These flags used for SSP SMP & SATA Abort */
-#define ABORT_MASK 0x3
-#define ABORT_SINGLE 0x0
-#define ABORT_ALL 0x1
-
/**
* brief the data structure of SSP SATA SMP Abort Response
* use to describe SSP SMP & SATA Abort Response ( 64 bytes)
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index fbfeb0b046dd..7a7d63aa90e2 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(link_rate, "Enable link rate.\n"
" 8: Link rate 12.0G\n");
static struct scsi_transport_template *pm8001_stt;
-static int pm8001_init_ccb_tag(struct pm8001_hba_info *, struct Scsi_Host *, struct pci_dev *);
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *);
/*
* chip info structure to identify chip key functionality as
@@ -81,12 +81,25 @@ LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
+static void pm8001_map_queues(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+
+ if (pm8001_ha->number_of_intr > 1)
+ blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+
+ return blk_mq_map_queues(qmap);
+}
+
/*
* The main structure which LLDD must register for scsi core.
*/
static struct scsi_host_template pm8001_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
+ .proc_name = DRV_NAME,
.queuecommand = sas_queuecommand,
.dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
@@ -109,6 +122,8 @@ static struct scsi_host_template pm8001_sht = {
#endif
.shost_groups = pm8001_host_groups,
.track_queue_depth = 1,
+ .cmd_per_lun = 32,
+ .map_queues = pm8001_map_queues,
};
/*
@@ -122,13 +137,14 @@ static struct sas_domain_function_template pm8001_transport_ops = {
.lldd_control_phy = pm8001_phy_control,
.lldd_abort_task = pm8001_abort_task,
- .lldd_abort_task_set = pm8001_abort_task_set,
- .lldd_clear_aca = pm8001_clear_aca,
+ .lldd_abort_task_set = sas_abort_task_set,
.lldd_clear_task_set = pm8001_clear_task_set,
.lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
.lldd_lu_reset = pm8001_lu_reset,
.lldd_query_task = pm8001_query_task,
.lldd_port_formed = pm8001_port_formed,
+ .lldd_tmf_exec_complete = pm8001_setds_completion,
+ .lldd_tmf_aborted = pm8001_tmf_aborted,
};
/**
@@ -142,6 +158,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->phy_state = PHY_LINK_DISABLE;
phy->pm8001_ha = pm8001_ha;
+ phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
@@ -179,7 +197,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
}
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
flush_workqueue(pm8001_wq);
- kfree(pm8001_ha->tags);
+ bitmap_free(pm8001_ha->tags);
kfree(pm8001_ha);
}
@@ -604,12 +622,8 @@ static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
shost->transportt = pm8001_stt;
shost->max_id = PM8001_MAX_DEVICES;
- shost->max_lun = 8;
- shost->max_channel = 0;
shost->unique_id = pm8001_id;
shost->max_cmd_len = 16;
- shost->can_queue = PM8001_CAN_QUEUE;
- shost->cmd_per_lun = 32;
return 0;
exit_free1:
kfree(arr_port);
@@ -930,31 +944,35 @@ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha)
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
{
- u32 number_of_intr;
- int rc, cpu_online_count;
unsigned int allocated_irq_vectors;
+ int rc;
/* SPCv controllers supports 64 msi-x */
if (pm8001_ha->chip_id == chip_8001) {
- number_of_intr = 1;
+ rc = pci_alloc_irq_vectors(pm8001_ha->pdev, 1, 1,
+ PCI_IRQ_MSIX);
} else {
- number_of_intr = PM8001_MAX_MSIX_VEC;
+ /*
+ * Queue index #0 is used always for housekeeping, so don't
+ * include in the affinity spreading.
+ */
+ struct irq_affinity desc = {
+ .pre_vectors = 1,
+ };
+ rc = pci_alloc_irq_vectors_affinity(
+ pm8001_ha->pdev, 2, PM8001_MAX_MSIX_VEC,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
}
- cpu_online_count = num_online_cpus();
- number_of_intr = min_t(int, cpu_online_count, number_of_intr);
- rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
- number_of_intr, PCI_IRQ_MSIX);
allocated_irq_vectors = rc;
if (rc < 0)
return rc;
/* Assigns the number of interrupts */
- number_of_intr = min_t(int, allocated_irq_vectors, number_of_intr);
- pm8001_ha->number_of_intr = number_of_intr;
+ pm8001_ha->number_of_intr = allocated_irq_vectors;
/* Maximum queue number updating in HBA structure */
- pm8001_ha->max_q_num = number_of_intr;
+ pm8001_ha->max_q_num = allocated_irq_vectors;
pm8001_dbg(pm8001_ha, INIT,
"pci_alloc_irq_vectors request ret:%d no of intr %d\n",
@@ -1121,10 +1139,23 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_ha_free;
}
- rc = pm8001_init_ccb_tag(pm8001_ha, shost, pdev);
+ rc = pm8001_init_ccb_tag(pm8001_ha);
if (rc)
goto err_out_enable;
+
+ PM8001_CHIP_DISP->chip_post_init(pm8001_ha);
+
+ if (pm8001_ha->number_of_intr > 1) {
+ shost->nr_hw_queues = pm8001_ha->number_of_intr - 1;
+ /*
+ * For now, ensure we're not sent too many commands by setting
+ * host_tagset. This is also required if we start using request
+ * tag.
+ */
+ shost->host_tagset = 1;
+ }
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
@@ -1174,16 +1205,14 @@ err_out_enable:
/**
* pm8001_init_ccb_tag - allocate memory to CCB and tag.
* @pm8001_ha: our hba card information.
- * @shost: scsi host which has been allocated outside.
- * @pdev: pci device.
*/
-static int
-pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
- struct pci_dev *pdev)
+static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha)
{
- int i = 0;
+ struct Scsi_Host *shost = pm8001_ha->shost;
+ struct device *dev = pm8001_ha->dev;
u32 max_out_io, ccb_count;
u32 can_queue;
+ int i;
max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io;
ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io);
@@ -1192,7 +1221,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
can_queue = ccb_count - PM8001_RESERVE_SLOT;
shost->can_queue = can_queue;
- pm8001_ha->tags = kzalloc(ccb_count, GFP_KERNEL);
+ pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL);
if (!pm8001_ha->tags)
goto err_out;
@@ -1206,7 +1235,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out_noccb;
}
for (i = 0; i < ccb_count; i++) {
- pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(&pdev->dev,
+ pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(dev,
sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
&pm8001_ha->ccb_info[i].ccb_dma_handle,
GFP_KERNEL);
@@ -1216,10 +1245,11 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out;
}
pm8001_ha->ccb_info[i].task = NULL;
- pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
+ pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG;
pm8001_ha->ccb_info[i].device = NULL;
++pm8001_ha->tags_num;
}
+
return 0;
err_out_noccb:
@@ -1335,13 +1365,13 @@ static int __maybe_unused pm8001_pci_resume(struct device *dev)
struct pm8001_hba_info *pm8001_ha;
int rc;
u8 i = 0, j;
- u32 device_state;
DECLARE_COMPLETION_ONSTACK(completion);
+
pm8001_ha = sha->lldd_ha;
- device_state = pdev->current_state;
- pm8001_info(pm8001_ha, "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n",
- pdev, pm8001_ha->name, device_state);
+ pm8001_info(pm8001_ha,
+ "pdev=0x%p, slot=%s, resuming from previous operating state [D%d]\n",
+ pdev, pm8001_ha->name, pdev->current_state);
rc = pci_go_44(pdev);
if (rc)
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 83e73009db5c..8e3f2f9ddaac 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
+#include "pm80xx_tracepoints.h"
/**
* pm8001_find_tag - from sas task to find out tag that belongs to this task
@@ -65,7 +66,11 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag)
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
{
void *bitmap = pm8001_ha->tags;
- clear_bit(tag, bitmap);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
+ __clear_bit(tag, bitmap);
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
}
/**
@@ -73,11 +78,11 @@ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
* @pm8001_ha: our hba struct
* @tag_out: the found empty tag .
*/
-inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
+int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
{
- unsigned int tag;
void *bitmap = pm8001_ha->tags;
unsigned long flags;
+ unsigned int tag;
spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
@@ -85,7 +90,7 @@ inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
return -SAS_QUEUE_FULL;
}
- set_bit(tag, bitmap);
+ __set_bit(tag, bitmap);
spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
*tag_out = tag;
return 0;
@@ -233,14 +238,13 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
}
{
struct sas_phy *phy = sas_phy->phy;
- uint32_t *qp = (uint32_t *)(((char *)
- pm8001_ha->io_mem[2].memvirtaddr)
- + 0x1034 + (0x4000 * (phy_id & 3)));
-
- phy->invalid_dword_count = qp[0];
- phy->running_disparity_error_count = qp[1];
- phy->loss_of_dword_sync_count = qp[3];
- phy->phy_reset_problem_count = qp[4];
+ u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
+ + 0x1034 + (0x4000 * (phy_id & 3));
+
+ phy->invalid_dword_count = readl(qp);
+ phy->running_disparity_error_count = readl(&qp[1]);
+ phy->loss_of_dword_sync_count = readl(&qp[3]);
+ phy->phy_reset_problem_count = readl(&qp[4]);
}
if (pm8001_ha->chip_id == chip_8001)
pm8001_bar4_shift(pm8001_ha, 0);
@@ -304,16 +308,12 @@ static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
{
struct ata_queued_cmd *qc = task->uldd_task;
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ ||
- qc->tf.command == ATA_CMD_FPDMA_RECV ||
- qc->tf.command == ATA_CMD_FPDMA_SEND ||
- qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
- *tag = qc->tag;
- return 1;
- }
+
+ if (qc && ata_is_ncq(qc->tf.protocol)) {
+ *tag = qc->tag;
+ return 1;
}
+
return 0;
}
@@ -329,13 +329,25 @@ static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
}
/**
+ * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
+ * for internal abort task
+ * @pm8001_ha: our hba card information
+ * @ccb: the ccb which attached to sata task
+ */
+static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
+}
+
+/**
* pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
* @pm8001_ha: our hba card information
* @ccb: the ccb which attached to TM
* @tmf: the task management IU
*/
static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
{
return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
}
@@ -370,169 +382,153 @@ static int sas_find_local_port_id(struct domain_device *dev)
#define DEV_IS_GONE(pm8001_dev) \
((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
+
+
+static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+ enum sas_protocol task_proto = task->task_proto;
+ struct sas_tmf_task *tmf = task->tmf;
+ int is_tmf = !!tmf;
+
+ switch (task_proto) {
+ case SAS_PROTOCOL_SMP:
+ return pm8001_task_prep_smp(pm8001_ha, ccb);
+ case SAS_PROTOCOL_SSP:
+ if (is_tmf)
+ return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
+ return pm8001_task_prep_ssp(pm8001_ha, ccb);
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ return pm8001_task_prep_ata(pm8001_ha, ccb);
+ case SAS_PROTOCOL_INTERNAL_ABORT:
+ return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
+ default:
+ dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
+ task_proto);
+ }
+
+ return -EINVAL;
+}
+
/**
- * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
+ * pm8001_queue_command - register for upper layer used, all IO commands sent
+ * to HBA are from this interface.
* @task: the task to be execute.
- * @gfp_flags: gfp_flags.
- * @is_tmf: if it is task management task.
- * @tmf: the task management IU
+ * @gfp_flags: gfp_flags
*/
-static int pm8001_task_exec(struct sas_task *task,
- gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
+int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
+ struct task_status_struct *ts = &task->task_status;
+ enum sas_protocol task_proto = task->task_proto;
struct domain_device *dev = task->dev;
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ bool internal_abort = sas_is_internal_abort(task);
struct pm8001_hba_info *pm8001_ha;
- struct pm8001_device *pm8001_dev;
struct pm8001_port *port = NULL;
- struct sas_task *t = task;
struct pm8001_ccb_info *ccb;
- u32 tag = 0xdeadbeef, rc = 0, n_elem = 0;
- unsigned long flags = 0;
- enum sas_protocol task_proto = t->task_proto;
+ unsigned long flags;
+ u32 n_elem = 0;
+ int rc = 0;
- if (!dev->port) {
- struct task_status_struct *tsm = &t->task_status;
- tsm->resp = SAS_TASK_UNDELIVERED;
- tsm->stat = SAS_PHY_DOWN;
+ if (!internal_abort && !dev->port) {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
if (dev->dev_type != SAS_SATA_DEV)
- t->task_done(t);
+ task->task_done(task);
return 0;
}
- pm8001_ha = pm8001_find_ha_by_dev(task->dev);
- if (pm8001_ha->controller_fatal_error) {
- struct task_status_struct *ts = &t->task_status;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ if (pm8001_ha->controller_fatal_error) {
ts->resp = SAS_TASK_UNDELIVERED;
- t->task_done(t);
+ task->task_done(task);
return 0;
}
+
pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
+
spin_lock_irqsave(&pm8001_ha->lock, flags);
- do {
- dev = t->dev;
- pm8001_dev = dev->lldd_dev;
- port = &pm8001_ha->port[sas_find_local_port_id(dev)];
- if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
- if (sas_protocol_ata(task_proto)) {
- struct task_status_struct *ts = &t->task_status;
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- t->task_done(t);
- spin_lock_irqsave(&pm8001_ha->lock, flags);
- continue;
- } else {
- struct task_status_struct *ts = &t->task_status;
- ts->resp = SAS_TASK_UNDELIVERED;
- ts->stat = SAS_PHY_DOWN;
- t->task_done(t);
- continue;
- }
- }
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- goto err_out;
- ccb = &pm8001_ha->ccb_info[tag];
-
- if (!sas_protocol_ata(task_proto)) {
- if (t->num_scatter) {
- n_elem = dma_map_sg(pm8001_ha->dev,
- t->scatter,
- t->num_scatter,
- t->data_dir);
- if (!n_elem) {
- rc = -ENOMEM;
- goto err_out_tag;
- }
- }
+ pm8001_dev = dev->lldd_dev;
+ port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+
+ if (!internal_abort &&
+ (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_PHY_DOWN;
+ if (sas_protocol_ata(task_proto)) {
+ spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+ task->task_done(task);
+ spin_lock_irqsave(&pm8001_ha->lock, flags);
} else {
- n_elem = t->num_scatter;
+ task->task_done(task);
}
+ rc = -ENODEV;
+ goto err_out;
+ }
- t->lldd_task = ccb;
- ccb->n_elem = n_elem;
- ccb->ccb_tag = tag;
- ccb->task = t;
- ccb->device = pm8001_dev;
- switch (task_proto) {
- case SAS_PROTOCOL_SMP:
- atomic_inc(&pm8001_dev->running_req);
- rc = pm8001_task_prep_smp(pm8001_ha, ccb);
- break;
- case SAS_PROTOCOL_SSP:
- atomic_inc(&pm8001_dev->running_req);
- if (is_tmf)
- rc = pm8001_task_prep_ssp_tm(pm8001_ha,
- ccb, tmf);
- else
- rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
- break;
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- atomic_inc(&pm8001_dev->running_req);
- rc = pm8001_task_prep_ata(pm8001_ha, ccb);
- break;
- default:
- dev_printk(KERN_ERR, pm8001_ha->dev,
- "unknown sas_task proto: 0x%x\n", task_proto);
- rc = -EINVAL;
- break;
- }
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
+ if (!ccb) {
+ rc = -SAS_QUEUE_FULL;
+ goto err_out;
+ }
- if (rc) {
- pm8001_dbg(pm8001_ha, IO, "rc is %x\n", rc);
- atomic_dec(&pm8001_dev->running_req);
- goto err_out_tag;
+ if (!sas_protocol_ata(task_proto)) {
+ if (task->num_scatter) {
+ n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
+ if (!n_elem) {
+ rc = -ENOMEM;
+ goto err_out_ccb;
+ }
}
- /* TODO: select normal or high priority */
- spin_lock(&t->task_state_lock);
- t->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock(&t->task_state_lock);
- } while (0);
- rc = 0;
- goto out_done;
-
-err_out_tag:
- pm8001_tag_free(pm8001_ha, tag);
+ } else {
+ n_elem = task->num_scatter;
+ }
+
+ task->lldd_task = ccb;
+ ccb->n_elem = n_elem;
+
+ atomic_inc(&pm8001_dev->running_req);
+
+ rc = pm8001_deliver_command(pm8001_ha, ccb);
+ if (rc) {
+ atomic_dec(&pm8001_dev->running_req);
+ if (!sas_protocol_ata(task_proto) && n_elem)
+ dma_unmap_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
+err_out_ccb:
+ pm8001_ccb_free(pm8001_ha, ccb);
+
err_out:
- dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
- if (!sas_protocol_ata(task_proto))
- if (n_elem)
- dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
- t->data_dir);
-out_done:
+ pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
+ }
+
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- return rc;
-}
-/**
- * pm8001_queue_command - register for upper layer used, all IO commands sent
- * to HBA are from this interface.
- * @task: the task to be execute.
- * @gfp_flags: gfp_flags
- */
-int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
-{
- return pm8001_task_exec(task, gfp_flags, 0, NULL);
+ return rc;
}
/**
* pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
* @pm8001_ha: our hba card information
- * @ccb: the ccb which attached to ssp task
- * @task: the task to be free.
- * @ccb_idx: ccb index.
+ * @ccb: the ccb which attached to ssp task to free
*/
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
+ struct pm8001_ccb_info *ccb)
{
- if (!ccb->task)
+ struct sas_task *task = ccb->task;
+ struct ata_queued_cmd *qc;
+ struct pm8001_device *pm8001_dev;
+
+ if (!task)
return;
- if (!sas_protocol_ata(task->task_proto))
- if (ccb->n_elem)
- dma_unmap_sg(pm8001_ha->dev, task->scatter,
- task->num_scatter, task->data_dir);
+
+ if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
+ dma_unmap_sg(pm8001_ha->dev, task->scatter,
+ task->num_scatter, task->data_dir);
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
@@ -549,11 +545,20 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
/* do nothing */
break;
}
+
+ if (sas_protocol_ata(task->task_proto)) {
+ /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
+ qc = task->uldd_task;
+ pm8001_dev = ccb->device;
+ trace_pm80xx_request_complete(pm8001_ha->id,
+ pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
+ ccb->ccb_tag, 0 /* ctlr_opcode not known */,
+ qc ? qc->tf.command : 0, // ata opcode
+ pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
+ }
+
task->lldd_task = NULL;
- ccb->task = NULL;
- ccb->ccb_tag = 0xFFFFFFFF;
- ccb->open_retry = 0;
- pm8001_tag_free(pm8001_ha, ccb_idx);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
/**
@@ -688,178 +693,7 @@ void pm8001_task_done(struct sas_task *task)
complete(&task->slow_task->completion);
}
-static void pm8001_tmf_timedout(struct timer_list *t)
-{
- struct sas_task_slow *slow = from_timer(slow, t, timer);
- struct sas_task *task = slow->task;
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->slow_task->completion);
- }
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-}
-
#define PM8001_TASK_TIMEOUT 20
-/**
- * pm8001_exec_internal_tmf_task - execute some task management commands.
- * @dev: the wanted device.
- * @tmf: which task management wanted to be take.
- * @para_len: para_len.
- * @parameter: ssp task parameter.
- *
- * when errors or exception happened, we may want to do something, for example
- * abort the issued task which result in this exception, it is done by calling
- * this function, note it is also with the task execute interface.
- */
-static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
- void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
-{
- int res, retry;
- struct sas_task *task = NULL;
- struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
- struct pm8001_device *pm8001_dev = dev->lldd_dev;
- DECLARE_COMPLETION_ONSTACK(completion_setstate);
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
- memcpy(&task->ssp_task, parameter, para_len);
- task->task_done = pm8001_task_done;
- task->slow_task->timer.function = pm8001_tmf_timedout;
- task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
- add_timer(&task->slow_task->timer);
-
- res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
- goto ex_err;
- }
- wait_for_completion(&task->slow_task->completion);
- if (pm8001_ha->chip_id != chip_8001) {
- pm8001_dev->setds_completion = &completion_setstate;
- PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
- pm8001_dev, DS_OPERATIONAL);
- wait_for_completion(&completion_setstate);
- }
- res = -TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
- pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
- tmf->tmf);
- goto ex_err;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_UNDERRUN) {
- /* no error, but return the number of bytes of
- * underrun */
- res = task->task_status.residual;
- break;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_DATA_OVERRUN) {
- pm8001_dbg(pm8001_ha, FAIL, "Blocked task error.\n");
- res = -EMSGSIZE;
- break;
- } else {
- pm8001_dbg(pm8001_ha, EH,
- " Task to dev %016llx response:0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- sas_free_task(task);
- task = NULL;
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
-
-static int
-pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
- u32 task_tag)
-{
- int res, retry;
- u32 ccb_tag;
- struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
-
- for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_slow_task(GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- task->dev = dev;
- task->task_proto = dev->tproto;
- task->task_done = pm8001_task_done;
- task->slow_task->timer.function = pm8001_tmf_timedout;
- task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
- add_timer(&task->slow_task->timer);
-
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
- goto ex_err;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
- ccb->n_elem = 0;
-
- res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
- pm8001_dev, flag, task_tag, ccb_tag);
-
- if (res) {
- del_timer(&task->slow_task->timer);
- pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n");
- goto ex_err;
- }
- wait_for_completion(&task->slow_task->completion);
- res = TMF_RESP_FUNC_FAILED;
- /* Even TMF timed out, return direct. */
- if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
- pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
- goto ex_err;
- }
-
- if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAS_SAM_STAT_GOOD) {
- res = TMF_RESP_FUNC_COMPLETE;
- break;
-
- } else {
- pm8001_dbg(pm8001_ha, EH,
- " Task to dev %016llx response: 0x%x status 0x%x\n",
- SAS_ADDR(dev->sas_addr),
- task->task_status.resp,
- task->task_status.stat);
- sas_free_task(task);
- task = NULL;
- }
- }
-ex_err:
- BUG_ON(retry == 3 && task != NULL);
- sas_free_task(task);
- return res;
-}
/**
* pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
@@ -880,8 +714,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
pm8001_dev->device_id, pm8001_dev->dev_type);
if (atomic_read(&pm8001_dev->running_req)) {
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
- pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
while (atomic_read(&pm8001_dev->running_req))
msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
@@ -900,18 +733,6 @@ void pm8001_dev_gone(struct domain_device *dev)
pm8001_dev_gone_notify(dev);
}
-static int pm8001_issue_ssp_tmf(struct domain_device *dev,
- u8 *lun, struct pm8001_tmf_task *tmf)
-{
- struct sas_ssp_task ssp_task;
- if (!(dev->tproto & SAS_PROTOCOL_SSP))
- return TMF_RESP_FUNC_ESUPP;
-
- memcpy((u8 *)&ssp_task.LUN, lun, 8);
- return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
- tmf);
-}
-
/* retry commands by ha, by task and/or by device */
void pm8001_open_reject_retry(
struct pm8001_hba_info *pm8001_ha,
@@ -931,9 +752,11 @@ void pm8001_open_reject_retry(
struct task_status_struct *ts;
struct pm8001_device *pm8001_dev;
unsigned long flags1;
- u32 tag;
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
+ if (ccb->ccb_tag == PM8001_INVALID_TAG)
+ continue;
+
pm8001_dev = ccb->device;
if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
continue;
@@ -945,9 +768,6 @@ void pm8001_open_reject_retry(
continue;
} else if (pm8001_dev != device_to_close)
continue;
- tag = ccb->ccb_tag;
- if (!tag || (tag == 0xFFFFFFFF))
- continue;
task = ccb->task;
if (!task || !task->task_done)
continue;
@@ -962,17 +782,16 @@ void pm8001_open_reject_retry(
atomic_dec(&pm8001_dev->running_req);
spin_lock_irqsave(&task->task_state_lock, flags1);
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags
& SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags1);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
task->task_done(task);
@@ -1018,8 +837,7 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
goto out;
}
msleep(2000);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ rc = sas_execute_internal_abort_dev(dev, 0, NULL);
if (rc) {
pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
"with rc %d\n", pm8001_dev->device_id, rc);
@@ -1064,8 +882,7 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
goto out;
}
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
msleep(100);
/* deregister the target device */
@@ -1080,8 +897,7 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
wait_for_completion(&completion_setstate);
} else {
/* send internal ssp/sata/smp abort command to FW */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
msleep(100);
/* deregister the target device */
@@ -1103,14 +919,12 @@ out:
int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
{
int rc = TMF_RESP_FUNC_FAILED;
- struct pm8001_tmf_task tmf_task;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
DECLARE_COMPLETION_ONSTACK(completion_setstate);
if (dev_is_sata(dev)) {
struct sas_phy *phy = sas_get_local_phy(dev);
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- dev, 1, 0);
+ sas_execute_internal_abort_dev(dev, 0, NULL);
rc = sas_phy_reset(phy, 1);
sas_put_local_phy(phy);
pm8001_dev->setds_completion = &completion_setstate;
@@ -1118,8 +932,7 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion_setstate);
} else {
- tmf_task.tmf = TMF_LU_RESET;
- rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ rc = sas_lu_reset(dev, lun);
}
/* If failed, fall-through I_T_Nexus reset */
pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
@@ -1131,8 +944,6 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
int pm8001_query_task(struct sas_task *task)
{
u32 tag = 0xdeadbeef;
- struct scsi_lun lun;
- struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
if (unlikely(!task || !task->lldd_task || !task->dev))
return rc;
@@ -1143,17 +954,14 @@ int pm8001_query_task(struct sas_task *task)
struct pm8001_hba_info *pm8001_ha =
pm8001_find_ha_by_dev(dev);
- int_to_scsilun(cmnd->device->lun, &lun);
rc = pm8001_find_tag(task, &tag);
if (rc == 0) {
rc = TMF_RESP_FUNC_FAILED;
return rc;
}
pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
- tmf_task.tmf = TMF_QUERY_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
- rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+ rc = sas_query_task(task, tag);
switch (rc) {
/* The task is still in Lun, release it then */
case TMF_RESP_FUNC_SUCC:
@@ -1179,11 +987,9 @@ int pm8001_abort_task(struct sas_task *task)
u32 tag;
struct domain_device *dev ;
struct pm8001_hba_info *pm8001_ha;
- struct scsi_lun lun;
struct pm8001_device *pm8001_dev;
- struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED, ret;
- u32 phy_id;
+ u32 phy_id, port_id;
struct sas_task_slow slow_task;
if (unlikely(!task || !task->lldd_task || !task->dev))
@@ -1217,19 +1023,15 @@ int pm8001_abort_task(struct sas_task *task)
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->task_proto & SAS_PROTOCOL_SSP) {
- struct scsi_cmnd *cmnd = task->uldd_task;
- int_to_scsilun(cmnd->device->lun, &lun);
- tmf_task.tmf = TMF_ABORT_TASK;
- tmf_task.tag_of_task_to_be_managed = tag;
- rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
- pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = sas_abort_task(task, tag);
+ sas_execute_internal_abort_single(dev, tag, 0, NULL);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (pm8001_ha->chip_id == chip_8006) {
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+ port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */
pm8001_dev->setds_completion = &completion;
@@ -1281,6 +1083,10 @@ int pm8001_abort_task(struct sas_task *task)
PORT_RESET_TMO);
if (phy->port_reset_status == PORT_RESET_TMO) {
pm8001_dev_gone_notify(dev);
+ PM8001_CHIP_DISP->hw_event_ack_req(
+ pm8001_ha, 0,
+ 0x07, /*HW_EVENT_PHY_DOWN ack*/
+ port_id, phy_id, 0, 0);
goto out;
}
}
@@ -1291,8 +1097,7 @@ int pm8001_abort_task(struct sas_task *task)
* is removed from the ccb. on success the caller is
* going to free the task.
*/
- ret = pm8001_exec_internal_task_abort(pm8001_ha,
- pm8001_dev, pm8001_dev->sas_device, 1, tag);
+ ret = sas_execute_internal_abort_dev(dev, 0, NULL);
if (ret)
goto out;
ret = wait_for_completion_timeout(
@@ -1308,14 +1113,12 @@ int pm8001_abort_task(struct sas_task *task)
pm8001_dev, DS_OPERATIONAL);
wait_for_completion(&completion);
} else {
- rc = pm8001_exec_internal_task_abort(pm8001_ha,
- pm8001_dev, pm8001_dev->sas_device, 0, tag);
+ ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
}
rc = TMF_RESP_FUNC_COMPLETE;
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
}
out:
@@ -1328,32 +1131,14 @@ out:
return rc;
}
-int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
-{
- struct pm8001_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_ABORT_TASK_SET;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-}
-
-int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
-{
- struct pm8001_tmf_task tmf_task;
-
- tmf_task.tmf = TMF_CLEAR_ACA;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
-}
-
int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
{
- struct pm8001_tmf_task tmf_task;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
pm8001_dev->device_id);
- tmf_task.tmf = TMF_CLEAR_TASK_SET;
- return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
+ return sas_clear_task_set(dev, lun);
}
void pm8001_port_formed(struct asd_sas_phy *sas_phy)
@@ -1370,3 +1155,25 @@ void pm8001_port_formed(struct asd_sas_phy *sas_phy)
}
sas_port->lldd_port = port;
}
+
+void pm8001_setds_completion(struct domain_device *dev)
+{
+ struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+ DECLARE_COMPLETION_ONSTACK(completion_setstate);
+
+ if (pm8001_ha->chip_id != chip_8001) {
+ pm8001_dev->setds_completion = &completion_setstate;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, DS_OPERATIONAL);
+ wait_for_completion(&completion_setstate);
+ }
+}
+
+void pm8001_tmf_aborted(struct sas_task *task)
+{
+ struct pm8001_ccb_info *ccb = task->lldd_task;
+
+ if (ccb)
+ ccb->task = NULL;
+}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 83eec16d021d..b08f52673889 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -55,6 +55,8 @@
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
@@ -71,7 +73,7 @@
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
#define pm8001_info(HBA, fmt, ...) \
- pr_info("%s:: %s %d:" fmt, \
+ pr_info("%s:: %s %d: " fmt, \
(HBA)->name, __func__, __LINE__, ##__VA_ARGS__)
#define pm8001_dbg(HBA, level, fmt, ...) \
@@ -99,11 +101,7 @@ extern const struct pm8001_dispatch pm8001_80xx_dispatch;
struct pm8001_hba_info;
struct pm8001_ccb_info;
struct pm8001_device;
-/* define task management IU */
-struct pm8001_tmf_task {
- u8 tmf;
- u32 tag_of_task_to_be_managed;
-};
+
struct pm8001_ioctl_payload {
u32 signature;
u16 major_function;
@@ -176,6 +174,7 @@ struct forensic_data {
struct pm8001_dispatch {
char *name;
int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
+ void (*chip_post_init)(struct pm8001_hba_info *pm8001_ha);
int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
@@ -200,10 +199,9 @@ struct pm8001_dispatch {
int (*phy_ctl_req)(struct pm8001_hba_info *pm8001_ha,
u32 phy_id, u32 phy_op);
int (*task_abort)(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag,
- u32 cmd_tag);
+ struct pm8001_ccb_info *ccb);
int (*ssp_tm_req)(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf);
+ struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf);
int (*get_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
int (*set_nvmd_req)(struct pm8001_hba_info *pm8001_ha, void *payload);
int (*fw_flash_update_req)(struct pm8001_hba_info *pm8001_ha,
@@ -216,6 +214,9 @@ struct pm8001_dispatch {
u32 state);
int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha);
+ void (*hw_event_ack_req)(struct pm8001_hba_info *pm8001_ha,
+ u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0,
+ u32 param1);
};
struct pm8001_chip_info {
@@ -611,7 +612,7 @@ struct fw_control_info {
operations.*/
u32 reserved;/* padding required for 64 bit
alignment */
- u8 buffer[1];/* Start of buffer */
+ u8 buffer[];/* Start of buffer */
};
struct fw_control_ex {
struct fw_control_info *fw_control;
@@ -638,15 +639,13 @@ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
+ struct pm8001_ccb_info *ccb);
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void pm8001_scan_start(struct Scsi_Host *shost);
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
int pm8001_abort_task(struct sas_task *task);
-int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
-int pm8001_clear_aca(struct domain_device *dev, u8 *lun);
int pm8001_clear_task_set(struct domain_device *dev, u8 *lun);
int pm8001_dev_found(struct domain_device *dev);
void pm8001_dev_gone(struct domain_device *dev);
@@ -665,8 +664,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
- struct inbound_queue_table *circularQ,
- u32 opCode, void *payload, size_t nb,
+ u32 q_index, u32 opCode, void *payload, size_t nb,
u32 responseQueue);
int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
u16 messageSize, void **messagePtr);
@@ -685,10 +683,9 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_ccb_info *ccb,
- struct pm8001_tmf_task *tmf);
+ struct sas_tmf_task *tmf);
int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
- struct pm8001_device *pm8001_dev,
- u8 flag, u32 task_tag, u32 cmd_tag);
+ struct pm8001_ccb_info *ccb);
int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
void pm8001_work_fn(struct work_struct *work);
@@ -735,15 +732,66 @@ void pm8001_free_dev(struct pm8001_device *pm8001_dev);
/* ctl shared API */
extern const struct attribute_group *pm8001_host_groups[];
-static inline void
-pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
- struct sas_task *task, struct pm8001_ccb_info *ccb,
- u32 ccb_idx)
+#define PM8001_INVALID_TAG ((u32)-1)
+
+/*
+ * Allocate a new tag and return the corresponding ccb after initializing it.
+ */
+static inline struct pm8001_ccb_info *
+pm8001_ccb_alloc(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_device *dev, struct sas_task *task)
{
- pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx);
+ struct pm8001_ccb_info *ccb;
+ u32 tag;
+
+ if (pm8001_tag_alloc(pm8001_ha, &tag)) {
+ pm8001_dbg(pm8001_ha, FAIL, "Failed to allocate a tag\n");
+ return NULL;
+ }
+
+ ccb = &pm8001_ha->ccb_info[tag];
+ ccb->task = task;
+ ccb->n_elem = 0;
+ ccb->ccb_tag = tag;
+ ccb->device = dev;
+ ccb->fw_control_context = NULL;
+ ccb->open_retry = 0;
+
+ return ccb;
+}
+
+/*
+ * Free the tag of an initialized ccb.
+ */
+static inline void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ u32 tag = ccb->ccb_tag;
+
+ /*
+ * Cleanup the ccb to make sure that a manual scan of the adapter
+ * ccb_info array can detect ccb's that are in use.
+ * C.f. pm8001_open_reject_retry()
+ */
+ ccb->task = NULL;
+ ccb->ccb_tag = PM8001_INVALID_TAG;
+ ccb->device = NULL;
+ ccb->fw_control_context = NULL;
+
+ pm8001_tag_free(pm8001_ha, tag);
+}
+
+static inline void pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha,
+ struct pm8001_ccb_info *ccb)
+{
+ struct sas_task *task = ccb->task;
+
+ pm8001_ccb_task_free(pm8001_ha, ccb);
smp_mb(); /*in order to force CPU ordering*/
task->task_done(task);
}
+void pm8001_setds_completion(struct domain_device *dev);
+void pm8001_tmf_aborted(struct sas_task *task);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 2101fc5761c3..f8b8624458f7 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -42,6 +42,7 @@
#include "pm80xx_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+#include "pm80xx_tracepoints.h"
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
@@ -66,18 +67,16 @@ int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value)
}
static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset,
- const void *destination,
+ __le32 *destination,
u32 dw_count, u32 bus_base_number)
{
u32 index, value, offset;
- u32 *destination1;
- destination1 = (u32 *)destination;
- for (index = 0; index < dw_count; index += 4, destination1++) {
+ for (index = 0; index < dw_count; index += 4, destination++) {
offset = (soffset + index);
if (offset < (64 * 1024)) {
value = pm8001_cr32(pm8001_ha, bus_base_number, offset);
- *destination1 = cpu_to_le32(value);
+ *destination = cpu_to_le32(value);
}
}
return;
@@ -767,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+ /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+ if (pm8001_ha->max_q_num > 32)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ 1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@@ -1028,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (0x0000 != gst_len_mpistate)
return -EBUSY;
+ /*
+ * As per controller datasheet, after successful MPI
+ * initialization minimum 500ms delay is required before
+ * issuing commands.
+ */
+ msleep(500);
+
return 0;
}
@@ -1183,7 +1193,6 @@ int
pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
- struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
@@ -1192,9 +1201,8 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
if (IS_SPCV_12G(pm8001_ha->pdev))
@@ -1202,15 +1210,17 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
else
page_code = THERMAL_PAGE_CODE_8H;
- payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
- (THERMAL_ENABLE << 8) | page_code;
- payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+ payload.cfg_pg[0] =
+ cpu_to_le32((THERMAL_LOG_ENABLE << 9) |
+ (THERMAL_ENABLE << 8) | page_code);
+ payload.cfg_pg[1] =
+ cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8));
pm8001_dbg(pm8001_ha, DEV,
"Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n",
payload.cfg_pg[0], payload.cfg_pg[1]);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1227,7 +1237,6 @@ static int
pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
{
struct set_ctrl_cfg_req payload;
- struct inbound_queue_table *circularQ;
SASProtocolTimerConfig_t SASConfigPage;
int rc;
u32 tag;
@@ -1237,55 +1246,51 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
-
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
- SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE;
- SASConfigPage.MST_MSI = 3 << 15;
- SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO;
- SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) |
- (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
- SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME;
-
- if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
- SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
-
-
- SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) |
- SAS_OPNRJT_RTRY_INTVL;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16)
- | SAS_COPNRJT_RTRY_TMO;
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16)
- | SAS_COPNRJT_RTRY_THR;
- SASConfigPage.MAX_AIP = SAS_MAX_AIP;
+ SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE);
+ SASConfigPage.MST_MSI = cpu_to_le32(3 << 15);
+ SASConfigPage.STP_SSP_MCT_TMO =
+ cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO);
+ SASConfigPage.STP_FRM_TMO =
+ cpu_to_le32((SAS_MAX_OPEN_TIME << 24) |
+ (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER);
+ SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME);
+
+ SASConfigPage.OPNRJT_RTRY_INTVL =
+ cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO);
+ SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =
+ cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR);
+ SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP);
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n",
- SASConfigPage.pageCode);
+ le32_to_cpu(SASConfigPage.pageCode));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n",
- SASConfigPage.MST_MSI);
+ le32_to_cpu(SASConfigPage.MST_MSI));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n",
- SASConfigPage.STP_SSP_MCT_TMO);
+ le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n",
- SASConfigPage.STP_FRM_TMO);
+ le32_to_cpu(SASConfigPage.STP_FRM_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n",
- SASConfigPage.STP_IDLE_TMO);
+ le32_to_cpu(SASConfigPage.STP_IDLE_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n",
- SASConfigPage.OPNRJT_RTRY_INTVL);
+ le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n",
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO);
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n",
- SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR);
+ le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n",
- SASConfigPage.MAX_AIP);
+ le32_to_cpu(SASConfigPage.MAX_AIP));
memcpy(&payload.cfg_pg, &SASConfigPage,
sizeof(SASProtocolTimerConfig_t));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1391,7 +1396,6 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
{
struct kek_mgmt_req payload;
- struct inbound_queue_table *circularQ;
int rc;
u32 tag;
u32 opc = OPC_INB_KEK_MANAGEMENT;
@@ -1399,21 +1403,21 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
memset(&payload, 0, sizeof(struct kek_mgmt_req));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- return -1;
+ return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
payload.tag = cpu_to_le32(tag);
/* Currently only one key is used. New KEK index is 1.
* Current KEK index is 1. Store KEK to NVRAM is 1.
*/
- payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
- KEK_MGMT_SUBOP_KEYCARDUPDATE);
+ payload.new_curidx_ksop =
+ cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) |
+ KEK_MGMT_SUBOP_KEYCARDUPDATE));
pm8001_dbg(pm8001_ha, DEV,
"Saving Encryption info to flash. payload 0x%x\n",
- payload.new_curidx_ksop);
+ le32_to_cpu(payload.new_curidx_ksop));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -1465,11 +1469,18 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
} else
return -EBUSY;
+ return 0;
+}
+
+static void pm80xx_chip_post_init(struct pm8001_hba_info *pm8001_ha)
+{
/* send SAS protocol timer configuration page to FW */
- ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+ pm80xx_set_sas_protocol_timer_config(pm8001_ha);
/* Check for encryption */
if (pm8001_ha->chip->encrypt) {
+ int ret;
+
pm8001_dbg(pm8001_ha, INIT, "Checking for encryption\n");
ret = pm80xx_get_encrypt_info(pm8001_ha);
if (ret == -1) {
@@ -1481,7 +1492,6 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
}
}
}
- return 0;
}
static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
@@ -1551,9 +1561,9 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
{
int ret = 0;
u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_6);
+ MSGU_SCRATCH_PAD_RSVD_0);
u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_7);
+ MSGU_SCRATCH_PAD_RSVD_1);
u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
@@ -1662,9 +1672,9 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
PCI_VENDOR_ID_ATTO &&
pm8001_ha->pdev->subsystem_vendor != 0) {
ibutton0 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_6);
+ MSGU_SCRATCH_PAD_RSVD_0);
ibutton1 = pm8001_cr32(pm8001_ha, 0,
- MSGU_HOST_SCRATCH_PAD_7);
+ MSGU_SCRATCH_PAD_RSVD_1);
if (!ibutton0 && !ibutton1) {
pm8001_dbg(pm8001_ha, FAIL,
"iButton Feature is not Available!!!\n");
@@ -1734,10 +1744,11 @@ static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- mask = (u32)(1 << vec);
-
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1753,12 +1764,15 @@ static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- if (vec == 0xFF)
- mask = 0xFFFFFFFF;
+ if (vec == 0xFF) {
+ /* disable all vectors 0-31, 32-63 */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+ } else if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
- mask = (u32)(1 << vec);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
@@ -1767,53 +1781,39 @@ pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_ha_dev)
{
- int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
- struct sas_task *task = NULL;
+ struct sas_task *task;
struct task_abort_req task_abort;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_ABORT;
int ret;
- if (!pm8001_ha_dev) {
- pm8001_dbg(pm8001_ha, FAIL, "dev is null\n");
- return;
- }
+ pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG;
+ pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n");
return;
}
-
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
sas_free_task(task);
return;
}
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
-
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
-
memset(&task_abort, 0, sizeof(task_abort));
task_abort.abort_all = cpu_to_le32(1);
task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- task_abort.tag = cpu_to_le32(ccb_tag);
+ task_abort.tag = cpu_to_le32(ccb->ccb_tag);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort,
- sizeof(task_abort), 0);
+ ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort,
+ sizeof(task_abort), 0);
pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n");
if (ret) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
}
}
@@ -1822,36 +1822,26 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
{
struct sata_start_req sata_cmd;
int res;
- u32 ccb_tag;
struct pm8001_ccb_info *ccb;
struct sas_task *task = NULL;
struct host_to_dev_fis fis;
struct domain_device *dev;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
task = sas_alloc_slow_task(GFP_ATOMIC);
-
if (!task) {
pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n");
return;
}
task->task_done = pm8001_task_done;
- res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res) {
- sas_free_task(task);
- pm8001_dbg(pm8001_ha, FAIL, "cannot allocate tag !!!\n");
- return;
- }
-
- /* allocate domain device by ourselves as libsas
- * is not going to provide any
- */
+ /*
+ * Allocate domain device by ourselves as libsas is not going to
+ * provide any.
+ */
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
pm8001_dbg(pm8001_ha, FAIL,
"Domain device cannot be allocated\n");
return;
@@ -1860,16 +1850,17 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
task->dev = dev;
task->dev->lldd_dev = pm8001_ha_dev;
- ccb = &pm8001_ha->ccb_info[ccb_tag];
- ccb->device = pm8001_ha_dev;
- ccb->ccb_tag = ccb_tag;
- ccb->task = task;
- ccb->n_elem = 0;
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task);
+ if (!ccb) {
+ sas_free_task(task);
+ kfree(dev);
+ return;
+ }
+
pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
/* construct read log FIS */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
@@ -1879,17 +1870,17 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
fis.lbal = 0x10;
fis.sector_count = 0x1;
- sata_cmd.tag = cpu_to_le32(ccb_tag);
+ sata_cmd.tag = cpu_to_le32(ccb->ccb_tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
- sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+ sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9)));
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
- res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd,
- sizeof(sata_cmd), 0);
+ res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd,
+ sizeof(sata_cmd), 0);
pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n");
if (res) {
sas_free_task(task);
- pm8001_tag_free(pm8001_ha, ccb_tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
kfree(dev);
}
}
@@ -2177,21 +2168,18 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psspPayload->ssp_resp_iu.status);
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@@ -2361,19 +2349,16 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with event 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- mb();/* in order to force CPU ordering */
- t->task_done(t);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
}
}
@@ -2400,21 +2385,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
- if (!tag) {
- pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
- return;
- }
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@@ -2431,10 +2407,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
}
ts = &t->task_status;
- if (!ts) {
- pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
- return;
- }
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
@@ -2491,11 +2463,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_SAM_STAT_GOOD;
/* check if response is for SEND READ LOG */
if (pm8001_dev &&
- (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
- /* set new bit for abort_all */
- pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
- /* clear bit for read log */
- pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+ (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
/* Free the tag */
pm8001_tag_free(pm8001_ha, tag);
@@ -2517,7 +2485,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
len = sizeof(struct pio_setup_fis);
pm8001_dbg(pm8001_ha, IO,
"PIO read len = %d\n", len);
- } else if (t->ata_task.use_ncq) {
+ } else if (t->ata_task.use_ncq &&
+ t->data_dir != DMA_NONE) {
len = sizeof(struct set_dev_bits_fis);
pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n",
len);
@@ -2619,7 +2588,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@@ -2639,7 +2608,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@@ -2667,7 +2636,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@@ -2742,7 +2711,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@@ -2766,7 +2735,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_QUEUE_FULL;
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
return;
@@ -2794,21 +2763,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
spin_lock_irqsave(&circularQ->oq_lock,
circularQ->lock_flags);
}
@@ -2828,17 +2796,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
-
- ccb = &pm8001_ha->ccb_info[tag];
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
- return;
- }
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2852,6 +2810,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
return;
}
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+
if (unlikely(!t || !t->lldd_task || !t->dev)) {
pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
@@ -2866,8 +2828,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2916,11 +2876,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
return;
}
break;
@@ -3020,24 +2975,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
- }
}
/*See the comments for mpi_ssp_completion */
@@ -3237,17 +3174,16 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
spin_lock_irqsave(&t->task_state_lock, flags);
t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
t->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%xstat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
mb();/* in order to force CPU ordering */
t->task_done(t);
}
@@ -3269,17 +3205,15 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
struct hw_event_ack_req payload;
u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
- struct inbound_queue_table *circularQ;
-
memset((u8 *)&payload, 0, sizeof(payload));
- circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
payload.tag = cpu_to_le32(1);
payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
((phyId & 0xFF) << 24) | (port_id & 0xFF));
payload.param0 = cpu_to_le32(param0);
payload.param1 = cpu_to_le32(param1);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ pm8001_mpi_build_cmd(pm8001_ha, Qnum, opc, &payload,
+ sizeof(payload), 0);
}
static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3522,7 +3456,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phy_id =
- le32_to_cpu(pPayload->phyid);
+ le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
pm8001_dbg(pm8001_ha, INIT,
@@ -3724,8 +3658,10 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
- pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
- port_id, phy_id, 0, 0);
+ if (!pm8001_ha->phy[phy_id].reset_completion) {
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ }
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
@@ -3793,8 +3729,12 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, MSG, "phy:0x%x status:0x%x\n",
phyid, status);
if (status == PHY_STOP_SUCCESS ||
- status == PHY_STOP_ERR_DEVICE_ATTACHED)
+ status == PHY_STOP_ERR_DEVICE_ATTACHED) {
phy->phy_state = PHY_LINK_DISABLE;
+ phy->sas_phy.phy->negotiated_linkrate = SAS_PHY_DISABLED;
+ phy->sas_phy.linkrate = SAS_PHY_DISABLED;
+ }
+
return 0;
}
@@ -3941,6 +3881,7 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
/**
* process_one_iomb - process one outbound Queue memory block
* @pm8001_ha: our hba card information
+ * @circularQ: outbound circular queue
* @piomb: IO message buffer
*/
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
@@ -4148,9 +4089,9 @@ static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
pm8001_dbg(pm8001_ha, FAIL, "MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5));
pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6));
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_0));
pm8001_dbg(pm8001_ha, FAIL, "MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
- pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7));
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_RSVD_1));
}
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
@@ -4161,10 +4102,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u32 ret = MPI_IO_STATUS_FAIL;
u32 regval;
+ /*
+ * Fatal errors are programmed to be signalled in irq vector
+ * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl.
+ * fatal_err_interrupt
+ */
if (vec == (pm8001_ha->max_q_num - 1)) {
+ u32 mipsall_ready;
+
+ if (pm8001_ha->chip_id == chip_8008 ||
+ pm8001_ha->chip_id == chip_8009)
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT;
+ else
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT;
+
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
- SCRATCH_PAD_MIPSALL_READY) {
+ if ((regval & mipsall_ready) != mipsall_ready) {
pm8001_ha->controller_fatal_error = true;
pm8001_dbg(pm8001_ha, FAIL,
"Firmware Fatal error! Regval:0x%x\n",
@@ -4172,6 +4125,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
pm8001_handle_event(pm8001_ha, NULL, IO_FATAL_ERROR);
print_scratchpad_registers(pm8001_ha);
return ret;
+ } else {
+ /*read scratchpad rsvd 0 register*/
+ regval = pm8001_cr32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_0);
+ switch (regval) {
+ case NON_FATAL_SPBC_LBUS_ECC_ERR:
+ case NON_FATAL_BDMA_ERR:
+ case NON_FATAL_THERM_OVERTEMP_ERR:
+ /*Clear the register*/
+ pm8001_cw32(pm8001_ha, 0,
+ MSGU_SCRATCH_PAD_RSVD_0,
+ 0x00000000);
+ break;
+ default:
+ break;
+ }
}
}
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
@@ -4243,7 +4212,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
- struct inbound_queue_table *circularQ;
u32 i, length;
u8 *payload;
u8 *to;
@@ -4272,7 +4240,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
}
opc = OPC_INB_SMP_REQUEST;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
length = sg_req->length;
@@ -4340,8 +4307,8 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
kunmap_atomic(to);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
- sizeof(smp_cmd), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &smp_cmd,
+ sizeof(smp_cmd), 0);
if (rc)
goto err_out_2;
return 0;
@@ -4388,6 +4355,29 @@ static int check_enc_sat_cmd(struct sas_task *task)
return ret;
}
+static u32 pm80xx_chip_get_q_index(struct sas_task *task)
+{
+ struct scsi_cmnd *scmd = NULL;
+ u32 blk_tag;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(task->dev)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
+ }
+
+ if (!scmd)
+ return 0;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ return blk_mq_unique_tag_to_hwq(blk_tag);
+}
+
/**
* pm80xx_chip_ssp_io_req - send an SSP task to FW
* @pm8001_ha: our hba card information.
@@ -4401,14 +4391,14 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *pm8001_dev = dev->lldd_dev;
struct ssp_ini_io_start_req ssp_cmd;
u32 tag = ccb->ccb_tag;
- int ret;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
- struct inbound_queue_table *circularQ;
- u32 q_index, cpu_id;
+ u32 q_index;
u32 opc = OPC_INB_SSPINIIOSTART;
+
memset(&ssp_cmd, 0, sizeof(ssp_cmd));
memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+
/* data address domain added for spcv; set to 0 by host,
* used internally by controller
* 0 for SAS 1.1 and SAS 2.0 compatible TLR
@@ -4419,14 +4409,12 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+ ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
- circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
+ q_index = pm80xx_chip_get_q_index(task);
/* Check if encryption is set */
if (pm8001_ha->chip->encrypt &&
@@ -4451,21 +4439,24 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.enc_addr_low =
cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.enc_addr_high) {
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+
+ if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.enc_len,
+ dma_addr,
+ le32_to_cpu(ssp_cmd.enc_len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
@@ -4474,7 +4465,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
cpu_to_le32(lower_32_bits(phys_addr));
ssp_cmd.enc_addr_high =
cpu_to_le32(upper_32_bits(phys_addr));
- ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+ ssp_cmd.enc_esgl = cpu_to_le32(1U<<31);
}
} else if (task->num_scatter == 0) {
ssp_cmd.enc_addr_low = 0;
@@ -4482,8 +4473,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.enc_esgl = 0;
}
+
/* XTS mode. All other fields are 0 */
- ssp_cmd.key_cmode = 0x6 << 4;
+ ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) |
(task->ssp_task.cmd->cmnd[3] << 16) |
@@ -4505,20 +4498,22 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = cpu_to_le32(1<<31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
ssp_cmd.addr_high =
cpu_to_le32(upper_32_bits(dma_addr));
ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + ssp_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != ssp_cmd.addr_high) {
+ end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, ssp_cmd.len,
+ dma_addr,
+ le32_to_cpu(ssp_cmd.len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
@@ -4536,9 +4531,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.esgl = 0;
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &ssp_cmd, sizeof(ssp_cmd), q_index);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &ssp_cmd,
+ sizeof(ssp_cmd), q_index);
}
static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
@@ -4547,39 +4542,35 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
- u32 tag = ccb->ccb_tag;
- int ret;
- u32 q_index, cpu_id;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ u32 tag = ccb->ccb_tag, q_index;
struct sata_start_req sata_cmd;
u32 hdr_tag, ncg_tag = 0;
- u64 phys_addr, start_addr, end_addr;
+ u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
u32 dir;
- struct inbound_queue_table *circularQ;
unsigned long flags;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
- cpu_id = smp_processor_id();
- q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num);
- circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
- if (task->data_dir == DMA_NONE) {
+ q_index = pm80xx_chip_get_q_index(task);
+
+ if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) {
ATAP = 0x04; /* no data*/
pm8001_dbg(pm8001_ha, IO, "no data\n");
} else if (likely(!task->ata_task.device_control_reg_update)) {
- if (task->ata_task.dma_xfer) {
+ if (task->ata_task.use_ncq &&
+ dev->sata_dev.class != ATA_DEV_ATAPI) {
+ ATAP = 0x07; /* FPDMA */
+ pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
+ } else if (task->ata_task.dma_xfer) {
ATAP = 0x06; /* DMA */
pm8001_dbg(pm8001_ha, IO, "DMA\n");
} else {
ATAP = 0x05; /* PIO*/
pm8001_dbg(pm8001_ha, IO, "PIO\n");
}
- if (task->ata_task.use_ncq &&
- dev->sata_dev.class != ATA_DEV_ATAPI) {
- ATAP = 0x07; /* FPDMA */
- pm8001_dbg(pm8001_ha, IO, "FPDMA\n");
- }
}
if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
@@ -4613,32 +4604,38 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
pm8001_chip_make_sg(task->scatter,
ccb->n_elem, ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
- sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
- sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(phys_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
- sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
- sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+
+ sata_cmd.enc_addr_low =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ sata_cmd.enc_addr_high =
+ cpu_to_le32(upper_32_bits(dma_addr));
sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
sata_cmd.enc_esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.enc_len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
- if (end_addr_high != sata_cmd.enc_addr_high) {
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
+ if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.enc_len,
+ dma_addr,
+ le32_to_cpu(sata_cmd.enc_len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
sata_cmd.enc_addr_low =
- lower_32_bits(phys_addr);
+ cpu_to_le32(lower_32_bits(phys_addr));
sata_cmd.enc_addr_high =
- upper_32_bits(phys_addr);
+ cpu_to_le32(upper_32_bits(phys_addr));
sata_cmd.enc_esgl =
cpu_to_le32(1 << 31);
}
@@ -4649,7 +4646,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.enc_esgl = 0;
}
/* XTS mode. All other fields are 0 */
- sata_cmd.key_index_mode = 0x6 << 4;
+ sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4);
+
/* set tweak values. Should be the start lba */
sata_cmd.twk_val0 =
cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
@@ -4675,31 +4673,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
phys_addr = ccb->ccb_dma_handle;
sata_cmd.addr_low = lower_32_bits(phys_addr);
sata_cmd.addr_high = upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
} else if (task->num_scatter == 1) {
u64 dma_addr = sg_dma_address(task->scatter);
+
sata_cmd.addr_low = lower_32_bits(dma_addr);
sata_cmd.addr_high = upper_32_bits(dma_addr);
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
+
/* Check 4G Boundary */
- start_addr = cpu_to_le64(dma_addr);
- end_addr = (start_addr + sata_cmd.len) - 1;
- end_addr_low = cpu_to_le32(lower_32_bits(end_addr));
- end_addr_high = cpu_to_le32(upper_32_bits(end_addr));
+ end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1;
+ end_addr_low = lower_32_bits(end_addr);
+ end_addr_high = upper_32_bits(end_addr);
if (end_addr_high != sata_cmd.addr_high) {
pm8001_dbg(pm8001_ha, FAIL,
"The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n",
- start_addr, sata_cmd.len,
+ dma_addr,
+ le32_to_cpu(sata_cmd.len),
end_addr_high, end_addr_low);
pm8001_chip_make_sg(task->scatter, 1,
ccb->buf_prd);
phys_addr = ccb->ccb_dma_handle;
- sata_cmd.addr_low =
- lower_32_bits(phys_addr);
- sata_cmd.addr_high =
- upper_32_bits(phys_addr);
- sata_cmd.esgl = cpu_to_le32(1 << 31);
+ sata_cmd.addr_low = lower_32_bits(phys_addr);
+ sata_cmd.addr_high = upper_32_bits(phys_addr);
+ sata_cmd.esgl = cpu_to_le32(1U << 31);
}
} else if (task->num_scatter == 0) {
sata_cmd.addr_low = 0;
@@ -4707,27 +4705,28 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.len = cpu_to_le32(task->total_xfer_len);
sata_cmd.esgl = 0;
}
+
/* scsi cdb */
sata_cmd.atapi_scsi_cdb[0] =
cpu_to_le32(((task->ata_task.atapi_packet[0]) |
- (task->ata_task.atapi_packet[1] << 8) |
- (task->ata_task.atapi_packet[2] << 16) |
- (task->ata_task.atapi_packet[3] << 24)));
+ (task->ata_task.atapi_packet[1] << 8) |
+ (task->ata_task.atapi_packet[2] << 16) |
+ (task->ata_task.atapi_packet[3] << 24)));
sata_cmd.atapi_scsi_cdb[1] =
cpu_to_le32(((task->ata_task.atapi_packet[4]) |
- (task->ata_task.atapi_packet[5] << 8) |
- (task->ata_task.atapi_packet[6] << 16) |
- (task->ata_task.atapi_packet[7] << 24)));
+ (task->ata_task.atapi_packet[5] << 8) |
+ (task->ata_task.atapi_packet[6] << 16) |
+ (task->ata_task.atapi_packet[7] << 24)));
sata_cmd.atapi_scsi_cdb[2] =
cpu_to_le32(((task->ata_task.atapi_packet[8]) |
- (task->ata_task.atapi_packet[9] << 8) |
- (task->ata_task.atapi_packet[10] << 16) |
- (task->ata_task.atapi_packet[11] << 24)));
+ (task->ata_task.atapi_packet[9] << 8) |
+ (task->ata_task.atapi_packet[10] << 16) |
+ (task->ata_task.atapi_packet[11] << 24)));
sata_cmd.atapi_scsi_cdb[3] =
cpu_to_le32(((task->ata_task.atapi_packet[12]) |
- (task->ata_task.atapi_packet[13] << 8) |
- (task->ata_task.atapi_packet[14] << 16) |
- (task->ata_task.atapi_packet[15] << 24)));
+ (task->ata_task.atapi_packet[13] << 8) |
+ (task->ata_task.atapi_packet[14] << 16) |
+ (task->ata_task.atapi_packet[15] << 24)));
}
/* Check for read log for failed drive and return */
@@ -4744,7 +4743,6 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_SAM_STAT_GOOD;
task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
task->task_state_flags |= SAS_TASK_STATE_DONE;
if (unlikely((task->task_state_flags &
SAS_TASK_STATE_ABORTED))) {
@@ -4754,21 +4752,24 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n",
task, ts->resp,
ts->stat);
- pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+ pm8001_ccb_task_free(pm8001_ha, ccb);
return 0;
} else {
spin_unlock_irqrestore(&task->task_state_lock,
flags);
- pm8001_ccb_task_free_done(pm8001_ha, task,
- ccb, tag);
+ pm8001_ccb_task_free_done(pm8001_ha, ccb);
atomic_dec(&pm8001_ha_dev->running_req);
return 0;
}
}
}
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
- &sata_cmd, sizeof(sata_cmd), q_index);
- return ret;
+ trace_pm80xx_request_issue(pm8001_ha->id,
+ ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS,
+ ccb->ccb_tag, opc,
+ qc ? qc->tf.command : 0, // ata opcode
+ ccb->device ? atomic_read(&ccb->device->running_req) : 0);
+ return pm8001_mpi_build_cmd(pm8001_ha, q_index, opc, &sata_cmd,
+ sizeof(sata_cmd), q_index);
}
/**
@@ -4780,11 +4781,9 @@ static int
pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
{
struct phy_start_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTART;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
@@ -4806,9 +4805,9 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
memcpy(payload.sas_identify.sas_addr,
&pm8001_ha->sas_addr, SAS_ADDR_SIZE);
payload.sas_identify.phy_id = phy_id;
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/**
@@ -4820,17 +4819,15 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
u8 phy_id)
{
struct phy_stop_req payload;
- struct inbound_queue_table *circularQ;
- int ret;
u32 tag = 0x01;
u32 opcode = OPC_INB_PHYSTOP;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
memset(&payload, 0, sizeof(payload));
payload.tag = cpu_to_le32(tag);
payload.phy_id = cpu_to_le32(phy_id);
- ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload,
- sizeof(payload), 0);
- return ret;
+
+ return pm8001_mpi_build_cmd(pm8001_ha, 0, opcode, &payload,
+ sizeof(payload), 0);
}
/*
@@ -4842,9 +4839,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
struct reg_dev_req payload;
u32 opc;
u32 stp_sspsmp_sata = 0x4;
- struct inbound_queue_table *circularQ;
u32 linkrate, phy_id;
- int rc, tag = 0xdeadbeef;
+ int rc;
struct pm8001_ccb_info *ccb;
u8 retryFlag = 0x1;
u16 firstBurstSize = 0;
@@ -4852,16 +4848,13 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
memset(&payload, 0, sizeof(payload));
- rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
- return rc;
- ccb = &pm8001_ha->ccb_info[tag];
- ccb->device = pm8001_dev;
- ccb->ccb_tag = tag;
- payload.tag = cpu_to_le32(tag);
+ ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, NULL);
+ if (!ccb)
+ return -SAS_QUEUE_FULL;
+
+ payload.tag = cpu_to_le32(ccb->ccb_tag);
if (flag == 1) {
stp_sspsmp_sata = 0x02; /*direct attached sata */
@@ -4895,10 +4888,10 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
- pm8001_tag_free(pm8001_ha, tag);
+ pm8001_ccb_free(pm8001_ha, ccb);
return rc;
}
@@ -4915,18 +4908,23 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
u32 tag;
int rc;
struct local_phy_ctl_req payload;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
return rc;
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
payload.tag = cpu_to_le32(tag);
payload.phyop_phyid =
cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
- return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
+
+ return rc;
}
static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha)
@@ -4967,25 +4965,27 @@ static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
u32 tag, i, j = 0;
int rc;
struct set_phy_profile_req payload;
- struct inbound_queue_table *circularQ;
u32 opc = OPC_INB_SET_PHY_PROFILE;
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
+ if (rc) {
pm8001_dbg(pm8001_ha, FAIL, "Invalid tag\n");
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
+ return;
+ }
+
payload.tag = cpu_to_le32(tag);
- payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid & 0xFF));
+ payload.ppc_phyid =
+ cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF));
pm8001_dbg(pm8001_ha, INIT,
" phy profile command for phy %x ,length is %d\n",
- payload.ppc_phyid, length);
+ le32_to_cpu(payload.ppc_phyid), length);
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
- payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
+ payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
- sizeof(payload), 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
+ sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
}
@@ -5009,25 +5009,26 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
u32 tag, opc;
int rc, i;
struct set_phy_profile_req payload;
- struct inbound_queue_table *circularQ;
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
- if (rc)
+ if (rc) {
pm8001_dbg(pm8001_ha, INIT, "Invalid tag\n");
+ return;
+ }
- circularQ = &pm8001_ha->inbnd_q_tbl[0];
opc = OPC_INB_SET_PHY_PROFILE;
payload.tag = cpu_to_le32(tag);
- payload.ppc_phyid = (((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8)
- | (phy & 0xFF));
+ payload.ppc_phyid =
+ cpu_to_le32(((SAS_PHY_ANALOG_SETTINGS_PAGE & 0xF) << 8)
+ | (phy & 0xFF));
for (i = 0; i < length; i++)
payload.reserved[i] = cpu_to_le32(*(buf + i));
- rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload,
+ rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
pm8001_tag_free(pm8001_ha, tag);
@@ -5037,6 +5038,7 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha,
const struct pm8001_dispatch pm8001_80xx_dispatch = {
.name = "pmc80xx",
.chip_init = pm80xx_chip_init,
+ .chip_post_init = pm80xx_chip_post_init,
.chip_soft_rst = pm80xx_chip_soft_rst,
.chip_rst = pm80xx_hw_chip_rst,
.chip_iounmap = pm8001_chip_iounmap,
@@ -5061,4 +5063,5 @@ const struct pm8001_dispatch pm8001_80xx_dispatch = {
.fw_flash_update_req = pm8001_chip_fw_flash_update_req,
.set_dev_state_req = pm8001_chip_set_dev_state_req,
.fatal_errors = pm80xx_fatal_errors,
+ .hw_event_ack_req = pm80xx_hw_event_ack_req,
};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index c7e5d93bea92..acf6e3005b84 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -672,11 +672,6 @@ struct task_abort_req {
u32 reserved[27];
} __attribute__((packed, aligned(4)));
-/* These flags used for SSP SMP & SATA Abort */
-#define ABORT_MASK 0x3
-#define ABORT_SINGLE 0x0
-#define ABORT_ALL 0x1
-
/**
* brief the data structure of SSP SATA SMP Abort Response
* use to describe SSP SMP & SATA Abort Response ( 64 bytes)
@@ -972,7 +967,7 @@ struct dek_mgmt_req {
struct set_phy_profile_req {
__le32 tag;
__le32 ppc_phyid;
- u32 reserved[29];
+ __le32 reserved[29];
} __attribute__((packed, aligned(4)));
/**
@@ -1366,8 +1361,8 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define MSGU_HOST_SCRATCH_PAD_3 0x60
#define MSGU_HOST_SCRATCH_PAD_4 0x64
#define MSGU_HOST_SCRATCH_PAD_5 0x68
-#define MSGU_HOST_SCRATCH_PAD_6 0x6C
-#define MSGU_HOST_SCRATCH_PAD_7 0x70
+#define MSGU_SCRATCH_PAD_RSVD_0 0x6C
+#define MSGU_SCRATCH_PAD_RSVD_1 0x70
#define MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) ((x & 0x3) == 0x2)
#define MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) (((x >> 2) & 0x3) == 0x2)
@@ -1405,8 +1400,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
-#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \
SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
+ SCRATCH_PAD_RAAE_READY)
+#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
SCRATCH_PAD_RAAE_READY)
/* boot loader state */
@@ -1435,6 +1434,11 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */
#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */
+/*state definition for Scratchpad Rsvd 0, Offset 0x6C, Non-fatal*/
+#define NON_FATAL_SPBC_LBUS_ECC_ERR 0x70000001
+#define NON_FATAL_BDMA_ERR 0xE0000001
+#define NON_FATAL_THERM_OVERTEMP_ERR 0x80000001
+
/* main configuration offset - byte offset */
#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */
#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.c b/drivers/scsi/pm8001/pm80xx_tracepoints.c
new file mode 100644
index 000000000000..344aface9cdb
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "pm80xx_tracepoints.h"
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.h b/drivers/scsi/pm8001/pm80xx_tracepoints.h
new file mode 100644
index 000000000000..5e669a8a9344
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pm80xx
+
+#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PM80XX_H
+
+#include <linux/tracepoint.h>
+#include "pm8001_sas.h"
+
+TRACE_EVENT(pm80xx_request_issue,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_request_complete,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_mpi_build_cmd,
+ TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci),
+
+ TP_ARGS(id, opc, htag, qi, pi, ci),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, opc)
+ __field(u32, htag)
+ __field(u32, qi)
+ __field(u32, pi)
+ __field(u32, ci)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->opc = opc;
+ __entry->htag = htag;
+ __entry->qi = qi;
+ __entry->pi = pi;
+ __entry->ci = ci;
+ ),
+
+ TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u",
+ __entry->id, __entry->opc, __entry->htag, __entry->qi,
+ __entry->pi, __entry->ci)
+);
+
+#endif /* _TRACE_PM80XX_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE pm80xx_tracepoints
+
+#include <trace/define_trace.h>
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 88046a793767..836ddc476764 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1434,7 +1434,7 @@ static int pmcraid_notify_aen(
return -EINVAL;
}
- /* send genetlink multicast message to notify appplications */
+ /* send genetlink multicast message to notify applications */
genlmsg_end(skb, msg_header);
result = genlmsg_multicast(&pmcraid_event_family, skb,
@@ -3182,127 +3182,8 @@ static int pmcraid_build_ioadl(
}
/**
- * pmcraid_free_sglist - Frees an allocated SG buffer list
- * @sglist: scatter/gather list pointer
- *
- * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
- *
- * Return value:
- * none
- */
-static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
-{
- sgl_free_order(sglist->scatterlist, sglist->order);
- kfree(sglist);
-}
-
-/**
- * pmcraid_alloc_sglist - Allocates memory for a SG list
- * @buflen: buffer length
- *
- * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
- * list.
- *
- * Return value
- * pointer to sglist / NULL on failure
- */
-static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
-{
- struct pmcraid_sglist *sglist;
- int sg_size;
- int order;
-
- sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
- order = (sg_size > 0) ? get_order(sg_size) : 0;
-
- /* Allocate a scatter/gather list for the DMA */
- sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
- if (sglist == NULL)
- return NULL;
-
- sglist->order = order;
- sgl_alloc_order(buflen, order, false,
- GFP_KERNEL | GFP_DMA | __GFP_ZERO, &sglist->num_sg);
-
- return sglist;
-}
-
-/**
- * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
- * @sglist: scatter/gather list pointer
- * @buffer: buffer pointer
- * @len: buffer length
- * @direction: data transfer direction
- *
- * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
- *
- * Return value:
- * 0 on success / other on failure
- */
-static int pmcraid_copy_sglist(
- struct pmcraid_sglist *sglist,
- void __user *buffer,
- u32 len,
- int direction
-)
-{
- struct scatterlist *sg;
- void *kaddr;
- int bsize_elem;
- int i;
- int rc = 0;
-
- /* Determine the actual number of bytes per element */
- bsize_elem = PAGE_SIZE * (1 << sglist->order);
-
- sg = sglist->scatterlist;
-
- for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, bsize_elem);
-
- kunmap(page);
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- return -EFAULT;
- }
-
- sg->length = bsize_elem;
- }
-
- if (len % bsize_elem) {
- struct page *page = sg_page(sg);
-
- kaddr = kmap(page);
-
- if (direction == DMA_TO_DEVICE)
- rc = copy_from_user(kaddr, buffer, len % bsize_elem);
- else
- rc = copy_to_user(buffer, kaddr, len % bsize_elem);
-
- kunmap(page);
-
- sg->length = len % bsize_elem;
- }
-
- if (rc) {
- pmcraid_err("failed to copy user data into sg list\n");
- rc = -EFAULT;
- }
-
- return rc;
-}
-
-/**
* pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
- * @done: done function
*
* This function queues a request generated by the mid-layer. Midlayer calls
* this routine within host->lock. Some of the functions called by queuecommand
@@ -3455,365 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
return rc;
}
-
-/**
- * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
- * commands sent over IOCTL interface
- *
- * @cmd : pointer to struct pmcraid_cmd
- * @buflen : length of the request buffer
- * @direction : data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static int pmcraid_build_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = NULL;
- struct scatterlist *sg = NULL;
- struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
- struct pmcraid_ioadl_desc *ioadl;
- int i;
-
- sglist = pmcraid_alloc_sglist(buflen);
-
- if (!sglist) {
- pmcraid_err("can't allocate memory for passthrough SGls\n");
- return -ENOMEM;
- }
-
- sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg, direction);
-
- if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
- dev_err(&cmd->drv_inst->pdev->dev,
- "Failed to map passthrough buffer!\n");
- pmcraid_free_sglist(sglist);
- return -EIO;
- }
-
- cmd->sglist = sglist;
- ioarcb->request_flags0 |= NO_LINK_DESCS;
-
- ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
-
- /* Initialize IOADL descriptor addresses */
- for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
- ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
- ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
- ioadl[i].flags = 0;
- }
-
- /* setup the last descriptor */
- ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
-
- return 0;
-}
-
-
-/**
- * pmcraid_release_passthrough_ioadls - release passthrough ioadls
- *
- * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
- * @buflen: size of the request buffer
- * @direction: data transfer direction
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static void pmcraid_release_passthrough_ioadls(
- struct pmcraid_cmd *cmd,
- int buflen,
- int direction
-)
-{
- struct pmcraid_sglist *sglist = cmd->sglist;
-
- if (buflen > 0) {
- dma_unmap_sg(&cmd->drv_inst->pdev->dev,
- sglist->scatterlist,
- sglist->num_sg,
- direction);
- pmcraid_free_sglist(sglist);
- cmd->sglist = NULL;
- }
-}
-
-/**
- * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
- *
- * @pinstance: pointer to adapter instance structure
- * @ioctl_cmd: ioctl code
- * @buflen: unused
- * @arg: pointer to pmcraid_passthrough_buffer user buffer
- *
- * Return value
- * 0 on success, non-zero error code on failure
- */
-static long pmcraid_ioctl_passthrough(
- struct pmcraid_instance *pinstance,
- unsigned int ioctl_cmd,
- unsigned int buflen,
- void __user *arg
-)
-{
- struct pmcraid_passthrough_ioctl_buffer *buffer;
- struct pmcraid_ioarcb *ioarcb;
- struct pmcraid_cmd *cmd;
- struct pmcraid_cmd *cancel_cmd;
- void __user *request_buffer;
- unsigned long request_offset;
- unsigned long lock_flags;
- void __user *ioasa;
- u32 ioasc;
- int request_size;
- int buffer_size;
- u8 direction;
- int rc = 0;
-
- /* If IOA reset is in progress, wait 10 secs for reset to complete */
- if (pinstance->ioa_reset_in_progress) {
- rc = wait_event_interruptible_timeout(
- pinstance->reset_wait_q,
- !pinstance->ioa_reset_in_progress,
- msecs_to_jiffies(10000));
-
- if (!rc)
- return -ETIMEDOUT;
- else if (rc < 0)
- return -ERESTARTSYS;
- }
-
- /* If adapter is not in operational state, return error */
- if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
- pmcraid_err("IOA is not operational\n");
- return -ENOTTY;
- }
-
- buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
- buffer = kmalloc(buffer_size, GFP_KERNEL);
-
- if (!buffer) {
- pmcraid_err("no memory for passthrough buffer\n");
- return -ENOMEM;
- }
-
- request_offset =
- offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
-
- request_buffer = arg + request_offset;
-
- rc = copy_from_user(buffer, arg,
- sizeof(struct pmcraid_passthrough_ioctl_buffer));
-
- ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
-
- if (rc) {
- pmcraid_err("ioctl: can't copy passthrough buffer\n");
- rc = -EFAULT;
- goto out_free_buffer;
- }
-
- request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
-
- if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
- direction = DMA_TO_DEVICE;
- } else {
- direction = DMA_FROM_DEVICE;
- }
-
- if (request_size < 0) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- /* check if we have any additional command parameters */
- if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
- > PMCRAID_ADD_CMD_PARAM_LEN) {
- rc = -EINVAL;
- goto out_free_buffer;
- }
-
- cmd = pmcraid_get_free_cmd(pinstance);
-
- if (!cmd) {
- pmcraid_err("free command block is not available\n");
- rc = -ENOMEM;
- goto out_free_buffer;
- }
-
- cmd->scsi_cmd = NULL;
- ioarcb = &(cmd->ioa_cb->ioarcb);
-
- /* Copy the user-provided IOARCB stuff field by field */
- ioarcb->resource_handle = buffer->ioarcb.resource_handle;
- ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
- ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
- ioarcb->request_type = buffer->ioarcb.request_type;
- ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
- ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
- memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
-
- if (buffer->ioarcb.add_cmd_param_length) {
- ioarcb->add_cmd_param_length =
- buffer->ioarcb.add_cmd_param_length;
- ioarcb->add_cmd_param_offset =
- buffer->ioarcb.add_cmd_param_offset;
- memcpy(ioarcb->add_data.u.add_cmd_params,
- buffer->ioarcb.add_data.u.add_cmd_params,
- le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
- }
-
- /* set hrrq number where the IOA should respond to. Note that all cmds
- * generated internally uses hrrq_id 0, exception to this is the cmd
- * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
- * hrrq_id assigned here in queuecommand
- */
- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
- pinstance->num_hrrq;
-
- if (request_size) {
- rc = pmcraid_build_passthrough_ioadls(cmd,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("couldn't build passthrough ioadls\n");
- goto out_free_cmd;
- }
- }
-
- /* If data is being written into the device, copy the data from user
- * buffers
- */
- if (direction == DMA_TO_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- goto out_free_sglist;
- }
- }
-
- /* passthrough ioctl is a blocking command so, put the user to sleep
- * until timeout. Note that a timeout value of 0 means, do timeout.
- */
- cmd->cmd_done = pmcraid_internal_done;
- init_completion(&cmd->wait_for_completion);
- cmd->completion_req = 1;
-
- pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0],
- le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- _pmcraid_fire_command(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- /* NOTE ! Remove the below line once abort_task is implemented
- * in firmware. This line disables ioctl command timeout handling logic
- * similar to IO command timeout handling, making ioctl commands to wait
- * until the command completion regardless of timeout value specified in
- * ioarcb
- */
- buffer->ioarcb.cmd_timeout = 0;
-
- /* If command timeout is specified put caller to wait till that time,
- * otherwise it would be blocking wait. If command gets timed out, it
- * will be aborted.
- */
- if (buffer->ioarcb.cmd_timeout == 0) {
- wait_for_completion(&cmd->wait_for_completion);
- } else if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
-
- pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
- le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
- cmd->ioa_cb->ioarcb.cdb[0]);
-
- spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
- cancel_cmd = pmcraid_abort_cmd(cmd);
- spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
- if (cancel_cmd) {
- wait_for_completion(&cancel_cmd->wait_for_completion);
- ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
- pmcraid_return_cmd(cancel_cmd);
-
- /* if abort task couldn't find the command i.e it got
- * completed prior to aborting, return good completion.
- * if command got aborted successfully or there was IOA
- * reset due to abort task itself getting timedout then
- * return -ETIMEDOUT
- */
- if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
- PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
- if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
- rc = -ETIMEDOUT;
- goto out_handle_response;
- }
- }
-
- /* no command block for abort task or abort task failed to abort
- * the IOARCB, then wait for 150 more seconds and initiate reset
- * sequence after timeout
- */
- if (!wait_for_completion_timeout(
- &cmd->wait_for_completion,
- msecs_to_jiffies(150 * 1000))) {
- pmcraid_reset_bringup(cmd->drv_inst);
- rc = -ETIMEDOUT;
- }
- }
-
-out_handle_response:
- /* copy entire IOASA buffer and return IOCTL success.
- * If copying IOASA to user-buffer fails, return
- * EFAULT
- */
- if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
- sizeof(struct pmcraid_ioasa))) {
- pmcraid_err("failed to copy ioasa buffer to user\n");
- rc = -EFAULT;
- }
-
- /* If the data transfer was from device, copy the data onto user
- * buffers
- */
- else if (direction == DMA_FROM_DEVICE && request_size > 0) {
- rc = pmcraid_copy_sglist(cmd->sglist,
- request_buffer,
- request_size,
- direction);
- if (rc) {
- pmcraid_err("failed to copy user buffer\n");
- rc = -EFAULT;
- }
- }
-
-out_free_sglist:
- pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
-
-out_free_cmd:
- pmcraid_return_cmd(cmd);
-
-out_free_buffer:
- kfree(buffer);
-
- return rc;
-}
-
-
-
-
/**
* pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
*
@@ -3923,20 +3445,6 @@ static long pmcraid_chr_ioctl(
switch (_IOC_TYPE(cmd)) {
- case PMCRAID_PASSTHROUGH_IOCTL:
- /* If ioctl code is to download microcode, we need to block
- * mid-layer requests.
- */
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_block_requests(pinstance->host);
-
- retval = pmcraid_ioctl_passthrough(pinstance, cmd,
- hdr->buffer_length, argp);
-
- if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
- scsi_unblock_requests(pinstance->host);
- break;
-
case PMCRAID_DRIVER_IOCTL:
arg += sizeof(struct pmcraid_ioctl_header);
retval = pmcraid_ioctl_driver(pinstance, cmd,
@@ -4523,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
return 0;
out_unwind:
- while (--i > 0)
+ while (--i >= 0)
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
pci_free_irq_vectors(pdev);
return rc;
@@ -5082,7 +4590,7 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
pint_regs->global_interrupt_mask_reg =
mapped_pci_addr + chip_cfg->global_intr_mask;
- };
+ }
pinstance->ioa_reset_attempts = 0;
init_waitqueue_head(&pinstance->reset_wait_q);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index bbb75318f1e7..9f59930e8b4f 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -1023,40 +1023,15 @@ struct pmcraid_ioctl_header {
#define PMCRAID_IOCTL_SIGNATURE "PMCRAID"
/*
- * pmcraid_passthrough_ioctl_buffer - structure given as argument to
- * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
- * 32-byte alignment so, it is necessary to pack this structure to avoid any
- * holes between ioctl_header and passthrough buffer
- *
- * .ioactl_header : ioctl header
- * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer
- * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware
- * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
- * the transfer directions passed in ioarcb.flags0. Contents
- * of this buffer are valid only when ioarcb.data_transfer_len
- * is not zero.
- */
-struct pmcraid_passthrough_ioctl_buffer {
- struct pmcraid_ioctl_header ioctl_header;
- struct pmcraid_ioarcb ioarcb;
- struct pmcraid_ioasa ioasa;
- u8 request_buffer[];
-} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
-
-/*
* keys to differentiate between driver handled IOCTLs and passthrough
* IOCTLs passed to IOA. driver determines the ioctl type using macro
* _IOC_TYPE
*/
#define PMCRAID_DRIVER_IOCTL 'D'
-#define PMCRAID_PASSTHROUGH_IOCTL 'F'
#define DRV_IOCTL(n, size) \
_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
-#define FMW_IOCTL(n, size) \
- _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size))
-
/*
* _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
* This is to facilitate applications avoiding un-necessary memory allocations.
@@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer {
#define PMCRAID_IOCTL_RESET_ADAPTER \
DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
-/* passthrough/firmware handled commands */
-#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \
- FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \
- FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-
#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 003043de23a5..c6c1bc608224 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -45,6 +45,11 @@ typedef struct {
#include "ppa.h"
+static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
static inline ppa_struct *ppa_dev(struct Scsi_Host *host)
{
return *(ppa_struct **)&host->hostdata;
@@ -56,7 +61,7 @@ static void got_it(ppa_struct *dev)
{
dev->base = dev->dev->port->base;
if (dev->cur_cmd)
- dev->cur_cmd->SCp.phase = 1;
+ ppa_scsi_pointer(dev->cur_cmd)->phase = 1;
else
wake_up(dev->waiting);
}
@@ -511,13 +516,14 @@ static inline int ppa_send_command(struct scsi_cmnd *cmd)
* The driver appears to remain stable if we speed up the parallel port
* i/o in this function, but not elsewhere.
*/
-static int ppa_completion(struct scsi_cmnd *cmd)
+static int ppa_completion(struct scsi_cmnd *const cmd)
{
/* Return codes:
* -1 Error
* 0 Told to schedule
* 1 Finished data transfer
*/
+ struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd);
ppa_struct *dev = ppa_dev(cmd->device->host);
unsigned short ppb = dev->base;
unsigned long start_jiffies = jiffies;
@@ -543,7 +549,7 @@ static int ppa_completion(struct scsi_cmnd *cmd)
if (time_after(jiffies, start_jiffies + 1))
return 0;
- if ((cmd->SCp.this_residual <= 0)) {
+ if (scsi_pointer->this_residual <= 0) {
ppa_fail(dev, DID_ERROR);
return -1; /* ERROR_RETURN */
}
@@ -572,28 +578,30 @@ static int ppa_completion(struct scsi_cmnd *cmd)
}
/* determine if we should use burst I/O */
- fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
- ? PPA_BURST_SIZE : 1;
+ fast = bulk && scsi_pointer->this_residual >= PPA_BURST_SIZE ?
+ PPA_BURST_SIZE : 1;
if (r == (unsigned char) 0xc0)
- status = ppa_out(dev, cmd->SCp.ptr, fast);
+ status = ppa_out(dev, scsi_pointer->ptr, fast);
else
- status = ppa_in(dev, cmd->SCp.ptr, fast);
+ status = ppa_in(dev, scsi_pointer->ptr, fast);
- cmd->SCp.ptr += fast;
- cmd->SCp.this_residual -= fast;
+ scsi_pointer->ptr += fast;
+ scsi_pointer->this_residual -= fast;
if (!status) {
ppa_fail(dev, DID_BUS_BUSY);
return -1; /* ERROR_RETURN */
}
- if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
+ if (scsi_pointer->buffer && !scsi_pointer->this_residual) {
/* if scatter/gather, advance to the next segment */
- if (cmd->SCp.buffers_residual--) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- cmd->SCp.this_residual =
- cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (scsi_pointer->buffers_residual--) {
+ scsi_pointer->buffer =
+ sg_next(scsi_pointer->buffer);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr =
+ sg_virt(scsi_pointer->buffer);
}
}
/* Now check to see if the drive is ready to comunicate */
@@ -658,7 +666,7 @@ static void ppa_interrupt(struct work_struct *work)
}
#endif
- if (cmd->SCp.phase > 1)
+ if (ppa_scsi_pointer(cmd)->phase > 1)
ppa_disconnect(dev);
ppa_pb_dismiss(dev);
@@ -670,6 +678,7 @@ static void ppa_interrupt(struct work_struct *work)
static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = ppa_scsi_pointer(cmd);
unsigned short ppb = dev->base;
unsigned char l = 0, h = 0;
int retv;
@@ -680,7 +689,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (dev->failed)
return 0;
- switch (cmd->SCp.phase) {
+ switch (scsi_pointer->phase) {
case 0: /* Phase 0 - Waiting for parport */
if (time_after(jiffies, dev->jstart + HZ)) {
/*
@@ -715,7 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 1; /* Try again in a jiffy */
}
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
}
fallthrough;
@@ -724,7 +733,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
ppa_fail(dev, DID_NO_CONNECT);
return 0;
}
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 3: /* Phase 3 - Ready to accept a command */
@@ -734,21 +743,22 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->this_residual =
+ scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.this_residual = 0;
- cmd->SCp.ptr = NULL;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->this_residual = 0;
+ scsi_pointer->ptr = NULL;
}
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.phase++;
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->phase++;
fallthrough;
case 5: /* Phase 5 - Data transfer stage */
@@ -761,7 +771,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
if (retv == 0)
return 1;
- cmd->SCp.phase++;
+ scsi_pointer->phase++;
fallthrough;
case 6: /* Phase 6 - Read status/message */
@@ -798,7 +808,7 @@ static int ppa_queuecommand_lck(struct scsi_cmnd *cmd)
dev->jstart = jiffies;
dev->cur_cmd = cmd;
cmd->result = DID_ERROR << 16; /* default return code */
- cmd->SCp.phase = 0; /* bus free */
+ ppa_scsi_pointer(cmd)->phase = 0; /* bus free */
schedule_delayed_work(&dev->ppa_tq, 0);
@@ -839,7 +849,7 @@ static int ppa_abort(struct scsi_cmnd *cmd)
* have tied the SCSI_MESSAGE line high in the interface
*/
- switch (cmd->SCp.phase) {
+ switch (ppa_scsi_pointer(cmd)->phase) {
case 0: /* Do not have access to parport */
case 1: /* Have not connected to interface */
dev->cur_cmd = NULL; /* Forget the problem */
@@ -861,7 +871,7 @@ static int ppa_reset(struct scsi_cmnd *cmd)
{
ppa_struct *dev = ppa_dev(cmd->device->host);
- if (cmd->SCp.phase)
+ if (ppa_scsi_pointer(cmd)->phase)
ppa_disconnect(dev);
dev->cur_cmd = NULL; /* Forget the problem */
@@ -976,6 +986,7 @@ static struct scsi_host_template ppa_template = {
.sg_tablesize = SG_ALL,
.can_queue = 1,
.slave_alloc = ppa_adjust_queue,
+ .cmd_size = sizeof(struct scsi_pointer),
};
/***************************************************************************
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index ca987451b17e..c5c0bbdafc4e 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -91,7 +91,6 @@ enum qedf_ioreq_event {
#define FC_GOOD 0
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
-#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
struct qedf_ioreq {
@@ -189,6 +188,15 @@ struct qedf_ioreq {
unsigned int alloc;
};
+struct qedf_cmd_priv {
+ struct qedf_ioreq *io_req;
+};
+
+static inline struct qedf_cmd_priv *qedf_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
extern struct workqueue_struct *qedf_io_wq;
struct qedf_rport {
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index fdc66d294813..8d8c760eee43 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -131,7 +131,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
struct qedf_ctx *qedf = NULL;
long reading;
int ret = 0;
- char msg[40];
if (off != 0)
return ret;
@@ -148,7 +147,6 @@ qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
return ret;
}
- memset(msg, 0, sizeof(msg));
switch (reading) {
case 0:
memset(qedf->grcdump, 0, qedf->grcdump_size);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 99a56ca1fb16..4750ec5789a8 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -804,7 +804,6 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
struct qedf_io_log *io_log;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
unsigned long flags;
- uint8_t op;
spin_lock_irqsave(&qedf->io_trace_lock, flags);
@@ -813,7 +812,7 @@ static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
io_log->task_id = io_req->xid;
io_log->port_id = fcport->rdata->ids.port_id;
io_log->lun = sc_cmd->device->lun;
- io_log->op = op = sc_cmd->cmnd[0];
+ io_log->op = sc_cmd->cmnd[0];
io_log->lba[0] = sc_cmd->cmnd[2];
io_log->lba[1] = sc_cmd->cmnd[3];
io_log->lba[2] = sc_cmd->cmnd[4];
@@ -857,7 +856,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
/* Initialize rest of io_req fileds */
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
- sc_cmd->SCp.ptr = (char *)io_req;
+ qedf_priv(sc_cmd)->io_req = io_req;
io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
/* Record which cpu this request is associated with */
@@ -894,7 +893,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
return -EINVAL;
}
- /* Record LUN number for later use if we neeed them */
+ /* Record LUN number for later use if we need them */
io_req->lun = (int)sc_cmd->device->lun;
/* Obtain free SQE */
@@ -1065,8 +1064,7 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
io_req->fcp_resid = fcp_rsp->fcp_resid;
io_req->scsi_comp_flags = rsp_flags;
- CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
- fcp_rsp->scsi_status_code;
+ io_req->cdb_status = fcp_rsp->scsi_status_code;
if (rsp_flags &
FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
@@ -1150,9 +1148,9 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
return;
}
- if (!sc_cmd->SCp.ptr) {
- QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
- "another context.\n");
+ if (!qedf_priv(sc_cmd)->io_req) {
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "io_req is NULL, returned in another context.\n");
return;
}
@@ -1312,7 +1310,7 @@ out:
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
- sc_cmd->SCp.ptr = NULL;
+ qedf_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
}
@@ -1354,9 +1352,9 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
goto bad_scsi_ptr;
}
- if (!sc_cmd->SCp.ptr) {
- QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
- "another context.\n");
+ if (!qedf_priv(sc_cmd)->io_req) {
+ QEDF_WARN(&(qedf->dbg_ctx),
+ "io_req is NULL, returned in another context.\n");
return;
}
@@ -1409,7 +1407,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
io_req->sc_cmd = NULL;
- sc_cmd->SCp.ptr = NULL;
+ qedf_priv(sc_cmd)->io_req = NULL;
scsi_done(sc_cmd);
kref_put(&io_req->refcount, qedf_release_cmd);
return;
@@ -2250,6 +2248,7 @@ process_els:
io_req->tm_flags == FCP_TMF_TGT_RESET) {
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
+ kref_put(&io_req->refcount, qedf_release_cmd);
complete(&io_req->tm_done);
}
@@ -2432,8 +2431,8 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
(tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
"LUN RESET");
- if (sc_cmd->SCp.ptr) {
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ if (qedf_priv(sc_cmd)->io_req) {
+ io_req = qedf_priv(sc_cmd)->io_req;
ref_cnt = kref_read(&io_req->refcount);
QEDF_ERR(NULL,
"orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 1bf7a22d4948..e045c6e25090 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -740,7 +740,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
}
- io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ io_req = qedf_priv(sc_cmd)->io_req;
if (!io_req) {
QEDF_ERR(&qedf->dbg_ctx,
"sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n",
@@ -873,7 +873,7 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
bool qedf_wait_for_upload(struct qedf_ctx *qedf)
{
- struct qedf_rport *fcport = NULL;
+ struct qedf_rport *fcport;
int wait_cnt = 120;
while (wait_cnt--) {
@@ -888,7 +888,7 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
rcu_read_lock();
list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
- if (fcport && test_bit(QEDF_RPORT_SESSION_READY,
+ if (test_bit(QEDF_RPORT_SESSION_READY,
&fcport->flags)) {
if (fcport->rdata)
QEDF_ERR(&qedf->dbg_ctx,
@@ -899,9 +899,9 @@ bool qedf_wait_for_upload(struct qedf_ctx *qedf)
"Waiting for fcport %p.\n", fcport);
}
}
+
rcu_read_unlock();
return false;
-
}
/* Performs soft reset of qedf_ctx by simulating a link down/up */
@@ -911,7 +911,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qed_link_output if_link;
if (lport->vport) {
- QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
+ printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
@@ -996,6 +996,7 @@ static struct scsi_host_template qedf_host_template = {
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
.change_queue_depth = scsi_change_queue_depth,
+ .cmd_size = sizeof(struct qedf_cmd_priv),
};
static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
@@ -1066,7 +1067,6 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
u32 crc;
unsigned int hlen, tlen, elen;
int wlen;
- struct fc_stats *stats;
struct fc_lport *tmp_lport;
struct fc_lport *vn_port = NULL;
struct qedf_rport *fcport;
@@ -1214,10 +1214,8 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
hp->fcoe_sof = sof;
/*update tx stats */
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->TxFrames++;
- stats->TxWords += wlen;
- put_cpu();
+ this_cpu_inc(lport->stats->TxFrames);
+ this_cpu_add(lport->stats->TxWords, wlen);
/* Get VLAN ID from skb for printing purposes */
__vlan_hwaccel_get_tag(skb, &vlan_tci);
@@ -1415,6 +1413,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
*/
term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
&term_params_dma, GFP_KERNEL);
+ if (!term_params)
+ return;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
"port_id=%06x.\n", fcport->rdata->ids.port_id);
@@ -1862,6 +1862,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
+ INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port);
if (rc) {
@@ -1920,6 +1921,27 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
fc_vport_setlink(vn_port);
}
+ /* Set symbolic node name */
+ if (base_qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (base_qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(vn_port->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
+
+ /* Set supported speed */
+ fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds;
+
+ /* Set speed */
+ vn_port->link_speed = n_port->link_speed;
+
+ /* Set port type */
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV;
+
+ /* Set maxframe size */
+ fc_host_maxframe_size(vn_port->host) = n_port->mfs;
+
QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
vn_port);
@@ -3685,11 +3707,6 @@ err2:
err1:
scsi_host_put(lport->host);
err0:
- if (qedf) {
- QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
-
- clear_bit(QEDF_PROBING, &qedf->flags);
- }
return rc;
}
@@ -3978,7 +3995,9 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
- QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+ printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ qedf->dbg_ctx.host_no);
qedf_ctx_soft_reset(qedf->lport);
}
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 5916ed7662d5..6901738324da 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -603,9 +603,9 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi,
goto error;
}
- if (!sc_cmd->SCp.ptr) {
+ if (!iscsi_cmd(sc_cmd)->task) {
QEDI_WARN(&qedi->dbg_ctx,
- "SCp.ptr is NULL, returned in another context.\n");
+ "NULL task pointer, returned in another context.\n");
goto error;
}
@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->list_tmf_work = NULL;
}
}
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
- if (!found) {
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ if (!found)
goto check_cleanup_reqs;
- }
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->state = CLEANUP_RECV;
unlock:
spin_unlock_bh(&conn->session->back_lock);
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
wake_up_interruptible(&qedi_conn->wait_queue);
return;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 282ecb4e39bb..31ec429104e2 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = {
.dma_boundary = QEDI_HW_DMA_BOUNDARY,
.cmd_per_lun = 128,
.shost_groups = qedi_shost_groups,
+ .cmd_size = sizeof(struct iscsi_cmd),
};
static void qedi_conn_free_login_resources(struct qedi_ctx *qedi,
@@ -859,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
return qedi_iscsi_send_ioreq(task);
}
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 5 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -907,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -1055,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ flush_work(&qedi_ep->offload_work);
+
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
- flush_work(&qedi_ep->offload_work);
-
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
abrt_conn = qedi_conn->abrt_conn;
@@ -1234,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
return rc;
}
-static void qedi_offload_work(struct work_struct *work)
-{
- struct qedi_endpoint *qedi_ep =
- container_of(work, struct qedi_endpoint, offload_work);
- struct qedi_ctx *qedi;
- int wait_delay = 5 * HZ;
- int ret;
-
- qedi = qedi_ep->qedi;
-
- ret = qedi_iscsi_offload_conn(qedi_ep);
- if (ret) {
- QEDI_ERR(&qedi->dbg_ctx,
- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
- qedi_ep->iscsi_cid, qedi_ep, ret);
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- return;
- }
-
- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
- (qedi_ep->state ==
- EP_STATE_OFLDCONN_COMPL),
- wait_delay);
- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- QEDI_ERR(&qedi->dbg_ctx,
- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
- qedi_ep->iscsi_cid, qedi_ep);
- }
-}
-
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1380,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
qedi_ep->dst_addr, qedi_ep->dst_port);
}
- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 1dec814d8788..df2fe7bd26d1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -618,7 +618,7 @@ static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
sizeof(struct qedi_endpoint *)), GFP_KERNEL);
if (!qedi->ep_tbl)
return -ENOMEM;
- port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
+ port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE);
if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
QEDI_LOCAL_PORT_MIN, port_id)) {
qedi_cm_free_mem(qedi);
@@ -1538,7 +1538,6 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
int i;
struct scsi_bd *pbl;
u64 *list;
- dma_addr_t page;
/* Alloc dma memory for BDQ buffers */
for (i = 0; i < QEDI_BDQ_NUM; i++) {
@@ -1608,11 +1607,9 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
QEDI_PAGE_SIZE;
list = (u64 *)qedi->bdq_pbl_list;
- page = qedi->bdq_pbl_list_dma;
for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
*list = qedi->bdq_pbl_dma;
list++;
- page += QEDI_PAGE_SIZE;
}
return 0;
@@ -2089,8 +2086,7 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n",
- SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = snprintf(buf, 3, "0\n");
@@ -2257,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = snprintf(buf, 3, "0\n");
@@ -2418,17 +2414,18 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
int rval;
u16 retry = 10;
- if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
- iscsi_host_remove(qedi->shost);
+ if (mode == QEDI_MODE_NORMAL)
+ iscsi_host_remove(qedi->shost, false);
+ else if (mode == QEDI_MODE_SHUTDOWN)
+ iscsi_host_remove(qedi->shost, true);
+ if (mode == QEDI_MODE_NORMAL || mode == QEDI_MODE_SHUTDOWN) {
if (qedi->tmf_thread) {
- flush_workqueue(qedi->tmf_thread);
destroy_workqueue(qedi->tmf_thread);
qedi->tmf_thread = NULL;
}
if (qedi->offload_thread) {
- flush_workqueue(qedi->offload_thread);
destroy_workqueue(qedi->offload_thread);
qedi->offload_thread = NULL;
}
@@ -2497,7 +2494,7 @@ static void qedi_board_disable_work(struct work_struct *work)
if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
return;
- __qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
+ __qedi_remove(qedi->pdev, QEDI_MODE_NORMAL);
}
static void qedi_shutdown(struct pci_dev *pdev)
@@ -2797,7 +2794,7 @@ remove_host:
#ifdef CONFIG_DEBUG_FS
qedi_dbg_host_exit(&qedi->dbg_ctx);
#endif
- iscsi_host_remove(qedi->shost);
+ iscsi_host_remove(qedi->shost, false);
stop_iscsi_func:
qedi_ops->stop(qedi->cdev);
stop_slowpath:
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 1dc56f4c89d8..1e7f4d138e06 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -477,13 +477,6 @@ __setup("qla1280=", qla1280_setup);
#endif
-/*
- * We use the scsi_pointer structure that's included with each scsi_command
- * to overlay our struct srb over it. qla1280_init() checks that a srb is not
- * bigger than a scsi_pointer.
- */
-
-#define CMD_SP(Cmnd) &Cmnd->SCp
#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
#define CMD_CDBP(Cmnd) Cmnd->cmnd
#define CMD_SNSP(Cmnd) Cmnd->sense_buffer
@@ -693,7 +686,7 @@ static int qla1280_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
- struct srb *sp = (struct srb *)CMD_SP(cmd);
+ struct srb *sp = scsi_cmd_priv(cmd);
int status;
sp->cmd = cmd;
@@ -828,7 +821,7 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
ENTER("qla1280_error_action");
ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
- sp = (struct srb *)CMD_SP(cmd);
+ sp = scsi_cmd_priv(cmd);
bus = SCSI_BUS_32(cmd);
target = SCSI_TCN_32(cmd);
lun = SCSI_LUN_32(cmd);
@@ -3959,7 +3952,7 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
int i;
ha = (struct scsi_qla_host *)host->hostdata;
- sp = (struct srb *)CMD_SP(cmd);
+ sp = scsi_cmd_priv(cmd);
printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
@@ -3979,7 +3972,6 @@ __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
} */
printk(" tag=%d, transfersize=0x%x \n",
scsi_cmd_to_rq(cmd)->tag, cmd->transfersize);
- printk(" SP=0x%p\n", CMD_SP(cmd));
printk(" underflow size = 0x%x, direction=0x%x\n",
cmd->underflow, cmd->sc_data_direction);
}
@@ -4045,7 +4037,6 @@ qla1280_setup(char *s)
{
char *cp, *ptr;
unsigned long val;
- int toke;
cp = s;
@@ -4060,7 +4051,7 @@ qla1280_setup(char *s)
} else
val = simple_strtoul(ptr, &ptr, 0);
- switch ((toke = qla1280_get_token(cp))) {
+ switch (qla1280_get_token(cp)) {
case TOKEN_NVRAM:
if (!val)
driver_setup.no_nvram = 1;
@@ -4139,6 +4130,7 @@ static struct scsi_host_template qla1280_driver_template = {
.can_queue = MAX_OUTSTANDING_COMMANDS,
.this_id = -1,
.sg_tablesize = SG_ALL,
+ .cmd_size = sizeof(struct srb),
};
@@ -4351,12 +4343,6 @@ static struct pci_driver qla1280_pci_driver = {
static int __init
qla1280_init(void)
{
- if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
- printk(KERN_WARNING
- "qla1280: struct srb too big, aborting\n");
- return -EINVAL;
- }
-
#ifdef MODULE
/*
* If we are called as a module, the qla1280 pointer may not be null
diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h
index e7820b5bca38..d309e2ca14de 100644
--- a/drivers/scsi/qla1280.h
+++ b/drivers/scsi/qla1280.h
@@ -87,8 +87,7 @@
#define RESPONSE_ENTRY_CNT 63 /* Number of response entries. */
/*
- * SCSI Request Block structure (sp) that is placed
- * on cmd->SCp location of every I/O
+ * SCSI Request Block structure (sp) that occurs after each struct scsi_cmnd.
*/
struct srb {
struct list_head list; /* (8/16) LU queue */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 032efb294ee5..b67ad30d56e6 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -555,7 +555,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
- if (IS_NOCACHE_VPD_TYPE(ha))
+ if (!IS_NOCACHE_VPD_TYPE(ha))
goto skip;
faddr = ha->flt_region_vpd << 2;
@@ -745,7 +745,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
uint32_t idc_control;
qla83xx_idc_lock(vha, 0);
@@ -951,9 +951,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
return 0;
+ mutex_lock(&vha->hw->optrom_mutex);
if (ha->dcbx_tlv)
goto do_read;
- mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
mutex_unlock(&vha->hw->optrom_mutex);
return 0;
@@ -1056,9 +1056,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
continue;
if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
continue;
- if (iter->type == 0x27 &&
- (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
- continue;
sysfs_remove_bin_file(&host->shost_gendev.kobj,
iter->attr);
@@ -2479,7 +2476,6 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
qla2x00_port_speed_store);
static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
-static DEVICE_ATTR_RO(edif_doorbell);
static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version.attr,
@@ -2524,7 +2520,6 @@ static struct attribute *qla2x00_host_attrs[] = {
&dev_attr_port_no.attr,
&dev_attr_fw_attr.attr,
&dev_attr_dport_diagnostics.attr,
- &dev_attr_edif_doorbell.attr,
&dev_attr_mpi_pause.attr,
&dev_attr_qlini_mode.attr,
&dev_attr_ql2xiniexchg.attr,
@@ -2700,7 +2695,13 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
rport->dev_loss_tmo = timeout ? timeout : 1;
+
+ if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ rport->dev_loss_tmo);
}
static void
@@ -2713,17 +2714,27 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- /* Now that the rport has been deleted, set the fcport state to
- FCS_DEVICE_DEAD */
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
+ ql_dbg(ql_dbg_async, fcport->vha, 0x5101,
+ DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d",
+ rport->port_state));
+
+ /*
+ * Now that the rport has been deleted, set the fcport state to
+ * FCS_DEVICE_DEAD, if the fcport is still lost.
+ */
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
/*
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
spin_lock_irqsave(host->host_lock, flags);
- fcport->rport = fcport->drport = NULL;
- *((fc_port_t **)rport->dd_data) = NULL;
+ /* Confirm port has not reappeared before clearing pointers. */
+ if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ fcport->rport = fcport->drport = NULL;
+ *((fc_port_t **)rport->dd_data) = NULL;
+ }
spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
@@ -2756,9 +2767,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
/*
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
+ *
+ * Attempt to cleanup only lost devices.
*/
if (fcport->loop_id != FC_NO_LOOP_ID) {
- if (IS_FWI2_CAPABLE(fcport->vha->hw)) {
+ if (IS_FWI2_CAPABLE(fcport->vha->hw) &&
+ fcport->scan_state != QLA_FCPORT_FOUND) {
if (fcport->loop_id != FC_NO_LOOP_ID)
fcport->logout_on_delete = 1;
@@ -2768,7 +2782,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
__LINE__);
qlt_schedule_sess_for_deletion(fcport);
}
- } else {
+ } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) {
qla2x00_port_logout(fcport->vha, fcport);
}
}
@@ -3316,11 +3330,34 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.bsg_timeout = qla24xx_bsg_timeout,
};
+static uint
+qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds)
+{
+ uint supported_speeds = FC_PORTSPEED_UNKNOWN;
+
+ if (speeds & FDMI_PORT_SPEED_64GB)
+ supported_speeds |= FC_PORTSPEED_64GBIT;
+ if (speeds & FDMI_PORT_SPEED_32GB)
+ supported_speeds |= FC_PORTSPEED_32GBIT;
+ if (speeds & FDMI_PORT_SPEED_16GB)
+ supported_speeds |= FC_PORTSPEED_16GBIT;
+ if (speeds & FDMI_PORT_SPEED_8GB)
+ supported_speeds |= FC_PORTSPEED_8GBIT;
+ if (speeds & FDMI_PORT_SPEED_4GB)
+ supported_speeds |= FC_PORTSPEED_4GBIT;
+ if (speeds & FDMI_PORT_SPEED_2GB)
+ supported_speeds |= FC_PORTSPEED_2GBIT;
+ if (speeds & FDMI_PORT_SPEED_1GB)
+ supported_speeds |= FC_PORTSPEED_1GBIT;
+
+ return supported_speeds;
+}
+
void
qla2x00_init_host_attr(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- u32 speeds = FC_PORTSPEED_UNKNOWN;
+ u32 speeds = 0, fdmi_speed = 0;
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
@@ -3330,7 +3367,8 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- speeds = qla25xx_fdmi_port_speed_capability(ha);
+ fdmi_speed = qla25xx_fdmi_port_speed_capability(ha);
+ speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed);
fc_host_supported_speeds(vha->host) = speeds;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 9da8034ccad4..cd75b179410d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -29,7 +29,8 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
"%s: sp hdl %x, result=%x bsg ptr %p\n",
__func__, sp->handle, res, bsg_job);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
@@ -2424,6 +2425,89 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
}
static int
+qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+ scsi_qla_host_t *vha = shost_priv(host);
+ int rval;
+ struct qla_dport_diag_v2 *dd;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ uint16_t options;
+
+ if (!IS_DPORT_CAPABLE(vha->hw))
+ return -EPERM;
+
+ dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+ if (!dd)
+ return -ENOMEM;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
+
+ options = dd->options;
+
+ /* Check dport Test in progress */
+ if (options == QLA_GET_DPORT_RESULT_V2 &&
+ vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_IN_PROCESS;
+ goto dportcomplete;
+ }
+
+ /* Check chip reset in progress and start/restart requests arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2)) {
+ vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ }
+
+ /* Check chip reset in progress and get result request arrive */
+ if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
+ options == QLA_GET_DPORT_RESULT_V2) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
+ goto dportcomplete;
+ }
+
+ rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
+
+ if (rval == QLA_SUCCESS) {
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+ if (options == QLA_START_DPORT_TEST_V2 ||
+ options == QLA_RESTART_DPORT_TEST_V2) {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
+ } else if (options == QLA_GET_DPORT_RESULT_V2) {
+ dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
+ dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
+ }
+ } else {
+ dd->mbx1 = mcp->mb[0];
+ dd->mbx2 = mcp->mb[1];
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_DPORT_DIAG_ERR;
+ }
+
+dportcomplete:
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
+
+ bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+ bsg_job->reply_len = sizeof(*bsg_reply);
+ bsg_reply->result = DID_OK << 16;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ kfree(dd);
+
+ return 0;
+}
+
+static int
qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
{
scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
@@ -2435,19 +2519,23 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
qla27xx_get_active_image(vha, &active_regions);
regions.global_image = active_regions.global;
+ if (IS_QLA27XX(ha))
+ regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
+
if (IS_QLA28XX(ha)) {
qla28xx_get_aux_images(vha, &active_regions);
regions.board_config = active_regions.aux.board_config;
regions.vpd_nvram = active_regions.aux.vpd_nvram;
regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
+ regions.nvme_params = active_regions.aux.nvme_params;
}
ql_dbg(ql_dbg_user, vha, 0x70e1,
- "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
+ "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
__func__, vha->host_no, regions.global_image,
regions.board_config, regions.vpd_nvram,
- regions.npiv_config_0_1, regions.npiv_config_2_3);
+ regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
@@ -2859,6 +2947,9 @@ qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_j
case QL_VND_DPORT_DIAGNOSTICS:
return qla2x00_do_dport_diagnostics(bsg_job);
+ case QL_VND_DPORT_DIAGNOSTICS_V2:
+ return qla2x00_do_dport_diagnostics_v2(bsg_job);
+
case QL_VND_EDIF_MGMT:
return qla_edif_app_mgmt(bsg_job);
@@ -2974,6 +3065,13 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
__func__, bsg_job);
+
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, vha, 0x9007,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
+
/* find the bsg job from the active list of commands */
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -2991,7 +3089,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
sp->u.bsg_job == bsg_job) {
req->outstanding_cmds[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
+
+ if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command failed.\n");
bsg_reply->result = -EIO;
@@ -3013,7 +3112,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 0f8a4c7e52a2..d38dab0a07e8 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -37,6 +37,7 @@
#define QL_VND_GET_TGT_STATS 0x25
#define QL_VND_MANAGE_HOST_PORT 0x26
#define QL_VND_MBX_PASSTHRU 0x2B
+#define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -60,6 +61,9 @@
#define EXT_STATUS_TIMEOUT 30
#define EXT_STATUS_THREAD_FAILED 31
#define EXT_STATUS_DATA_CMP_FAILED 32
+#define EXT_STATUS_DPORT_DIAG_ERR 40
+#define EXT_STATUS_DPORT_DIAG_IN_PROCESS 41
+#define EXT_STATUS_DPORT_DIAG_NOT_RUNNING 42
/* BSG definations for interpreting CommandSent field */
#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -157,7 +161,7 @@ struct qla84_msg_mgmt {
uint16_t rsrvd;
struct qla84_mgmt_param mgmtp;/* parameters for cmd */
uint32_t len; /* bytes in payload following this struct */
- uint8_t payload[0]; /* payload for cmd */
+ uint8_t payload[]; /* payload for cmd */
};
struct qla_bsg_a84_mgmt {
@@ -216,7 +220,7 @@ struct qla_image_version {
struct qla_image_version_list {
uint32_t count;
- struct qla_image_version version[0];
+ struct qla_image_version version[];
} __packed;
struct qla_status_reg {
@@ -288,6 +292,17 @@ struct qla_dport_diag {
uint8_t unused[62];
} __packed;
+#define QLA_GET_DPORT_RESULT_V2 0 /* Get Result */
+#define QLA_RESTART_DPORT_TEST_V2 1 /* Restart test */
+#define QLA_START_DPORT_TEST_V2 2 /* Start test */
+struct qla_dport_diag_v2 {
+ uint16_t options;
+ uint16_t mbx1;
+ uint16_t mbx2;
+ uint8_t unused[58];
+ uint8_t buf[1024]; /* Test Result */
+} __packed;
+
/* D_Port options */
#define QLA_DPORT_RESULT 0x0
#define QLA_DPORT_START 0x2
@@ -299,7 +314,8 @@ struct qla_active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
- uint8_t reserved[32];
+ uint8_t nvme_params;
+ uint8_t reserved[31];
} __packed;
#include "qla_edif_bsg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 7cf1f78cbaee..d7e8454304ce 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2455,7 +2455,7 @@ qla83xx_fw_dump_failed_0:
/****************************************************************************/
/* Write the debug message prefix into @pbuf. */
-static void ql_dbg_prefix(char *pbuf, int pbuf_size,
+static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev,
const scsi_qla_host_t *vha, uint msg_id)
{
if (vha) {
@@ -2464,6 +2464,9 @@ static void ql_dbg_prefix(char *pbuf, int pbuf_size,
/* <module-name> [<dev-name>]-<msg-id>:<host>: */
snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
dev_name(&(pdev->dev)), msg_id, vha->host_no);
+ } else if (pdev) {
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&pdev->dev), msg_id);
} else {
/* <module-name> [<dev-name>]-<msg-id>: : */
snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
@@ -2491,20 +2494,20 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
struct va_format vaf;
char pbuf[64];
- if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+ ql_ktrace(1, level, pbuf, NULL, vha, id, fmt);
+
+ if (!ql_mask_match(level))
return;
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
+
va_start(va, fmt);
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
-
- if (!ql_mask_match(level))
- trace_ql_dbg_log(pbuf, &vaf);
- else
- pr_warn("%s%pV", pbuf, &vaf);
+ pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
@@ -2533,6 +2536,9 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
if (pdev == NULL)
return;
+
+ ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt);
+
if (!ql_mask_match(level))
return;
@@ -2541,7 +2547,9 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset);
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL,
+ id + ql_dbg_offset);
pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
@@ -2570,7 +2578,10 @@ ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
+ ql_ktrace(0, level, pbuf, NULL, vha, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
va_start(va, fmt);
@@ -2621,7 +2632,10 @@ ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id);
+ ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id);
va_start(va, fmt);
@@ -2716,7 +2730,11 @@ ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
if (level > ql_errlev)
return;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id);
+ ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
+
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id);
va_start(va, fmt);
@@ -2762,6 +2780,8 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
struct va_format vaf;
char pbuf[128];
+ ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
+
if (!ql_mask_match(level))
return;
@@ -2770,8 +2790,10 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
vaf.fmt = fmt;
vaf.va = &va;
- ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL,
- id + ql_dbg_offset);
+ if (!pbuf[0]) /* set by ql_ktrace */
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
+ qpair ? qpair->vha : NULL, id + ql_dbg_offset);
+
pr_warn("%s%pV", pbuf, &vaf);
va_end(va);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f1f6c740bdcd..70482b55d240 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -383,5 +383,48 @@ ql_mask_match(uint level)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
- return (level & ql2xextended_error_logging) == level;
+ return level && ((level & ql2xextended_error_logging) == level);
}
+
+static inline int
+ql_mask_match_ext(uint level, int *log_tunable)
+{
+ if (*log_tunable == 1)
+ *log_tunable = QL_DBG_DEFAULT1_MASK;
+
+ return (level & *log_tunable) == level;
+}
+
+/* Assumes local variable pbuf and pbuf_ready present. */
+#define ql_ktrace(dbg_msg, level, pbuf, pdev, vha, id, fmt) do { \
+ struct va_format _vaf; \
+ va_list _va; \
+ u32 dbg_off = dbg_msg ? ql_dbg_offset : 0; \
+ \
+ pbuf[0] = 0; \
+ if (!trace_ql_dbg_log_enabled()) \
+ break; \
+ \
+ if (dbg_msg && !ql_mask_match_ext(level, \
+ &ql2xextended_error_logging_ktrace)) \
+ break; \
+ \
+ ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, vha, id + dbg_off); \
+ \
+ va_start(_va, fmt); \
+ _vaf.fmt = fmt; \
+ _vaf.va = &_va; \
+ \
+ trace_ql_dbg_log(pbuf, &_vaf); \
+ \
+ va_end(_va); \
+} while (0)
+
+#define QLA_ENABLE_KERNEL_TRACING
+
+#ifdef QLA_ENABLE_KERNEL_TRACING
+#define QLA_TRACE_ENABLE(_tr) \
+ trace_array_set_clr_event(_tr, "qla", NULL, true)
+#else /* QLA_ENABLE_KERNEL_TRACING */
+#define QLA_TRACE_ENABLE(_tr)
+#endif /* QLA_ENABLE_KERNEL_TRACING */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9ebf4a234d9a..802eec6407d9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,11 @@
#include <uapi/scsi/fc/fc_els.h>
+#define QLA_DFS_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *dfs_##_debugfs_file_name
+#define QLA_DFS_ROOT_DEFINE_DENTRY(_debugfs_file_name) \
+ struct dentry *qla_dfs_##_debugfs_file_name
+
/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */
typedef struct {
uint8_t domain;
@@ -78,7 +83,7 @@ typedef union {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "QLogic Corporation"
+#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -726,6 +731,11 @@ typedef struct srb {
* code.
*/
void (*put_fn)(struct kref *kref);
+
+ /*
+ * Report completion for asynchronous commands.
+ */
+ void (*async_done)(struct srb *sp, int res);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
@@ -1168,6 +1178,12 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs)
/* ISP mailbox loopback echo diagnostic error code */
#define MBS_LB_RESET 0x17
+
+/* AEN mailbox Port Diagnostics test */
+#define AEN_START_DIAG_TEST 0x0 /* start the diagnostics */
+#define AEN_DONE_DIAG_TEST_WITH_NOERR 0x1 /* Done with no errors */
+#define AEN_DONE_DIAG_TEST_WITH_ERR 0x2 /* Done with error.*/
+
/*
* Firmware options 1, 2, 3.
*/
@@ -2153,6 +2169,11 @@ typedef struct {
#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
failure */
#define CS_REJECT_RECEIVED 0x4E /* Reject received */
+#define CS_EDIF_AUTH_ERROR 0x63 /* decrypt error */
+#define CS_EDIF_PAD_LEN_ERROR 0x65 /* pad > frame size, not 4byte align */
+#define CS_EDIF_INV_REQ 0x66 /* invalid request */
+#define CS_EDIF_SPI_ERROR 0x67 /* rx frame unable to locate sa */
+#define CS_EDIF_HDR_ERROR 0x69 /* data frame != expected len */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
@@ -2621,7 +2642,6 @@ typedef struct fc_port {
struct {
uint32_t enable:1; /* device is edif enabled/req'd */
uint32_t app_stop:2;
- uint32_t app_started:1;
uint32_t aes_gmac:1;
uint32_t app_sess_online:1;
uint32_t tx_sa_set:1;
@@ -2632,6 +2652,7 @@ typedef struct fc_port {
uint32_t rx_rekey_cnt;
uint64_t tx_bytes;
uint64_t rx_bytes;
+ uint8_t sess_down_acked;
uint8_t auth_state;
uint16_t authok:1;
uint16_t rekey_cnt;
@@ -2666,25 +2687,28 @@ struct event_arg {
/*
* Fibre channel port/lun states.
*/
-#define FCS_UNCONFIGURED 1
-#define FCS_DEVICE_DEAD 2
-#define FCS_DEVICE_LOST 3
-#define FCS_ONLINE 4
+enum {
+ FCS_UNKNOWN,
+ FCS_UNCONFIGURED,
+ FCS_DEVICE_DEAD,
+ FCS_DEVICE_LOST,
+ FCS_ONLINE,
+};
extern const char *const port_state_str[5];
-static const char * const port_dstate_str[] = {
- "DELETED",
- "GNN_ID",
- "GNL",
- "LOGIN_PEND",
- "LOGIN_FAILED",
- "GPDB",
- "UPD_FCPORT",
- "LOGIN_COMPLETE",
- "ADISC",
- "DELETE_PEND",
- "LOGIN_AUTH_PEND",
+static const char *const port_dstate_str[] = {
+ [DSC_DELETED] = "DELETED",
+ [DSC_GNN_ID] = "GNN_ID",
+ [DSC_GNL] = "GNL",
+ [DSC_LOGIN_PEND] = "LOGIN_PEND",
+ [DSC_LOGIN_FAILED] = "LOGIN_FAILED",
+ [DSC_GPDB] = "GPDB",
+ [DSC_UPD_FCPORT] = "UPD_FCPORT",
+ [DSC_LOGIN_COMPLETE] = "LOGIN_COMPLETE",
+ [DSC_ADISC] = "ADISC",
+ [DSC_DELETE_PEND] = "DELETE_PEND",
+ [DSC_LOGIN_AUTH_PEND] = "LOGIN_AUTH_PEND",
};
/*
@@ -2886,7 +2910,11 @@ struct ct_fdmi2_hba_attributes {
#define FDMI_PORT_SPEED_8GB 0x10
#define FDMI_PORT_SPEED_16GB 0x20
#define FDMI_PORT_SPEED_32GB 0x40
-#define FDMI_PORT_SPEED_64GB 0x80
+#define FDMI_PORT_SPEED_20GB 0x80
+#define FDMI_PORT_SPEED_40GB 0x100
+#define FDMI_PORT_SPEED_128GB 0x200
+#define FDMI_PORT_SPEED_64GB 0x400
+#define FDMI_PORT_SPEED_256GB 0x800
#define FDMI_PORT_SPEED_UNKNOWN 0x8000
#define FC_CLASS_2 0x04
@@ -3192,6 +3220,8 @@ struct ct_sns_rsp {
#define GFF_NVME_OFFSET 23 /* type = 28h */
struct {
uint8_t fc4_features[128];
+#define FC4_FF_TARGET BIT_0
+#define FC4_FF_INITIATOR BIT_1
} gff_id;
struct {
uint8_t reserved;
@@ -3963,6 +3993,7 @@ struct qla_hw_data {
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
+ u8 port_name[WWN_SIZE];
volatile struct {
uint32_t mbox_int :1;
@@ -4028,6 +4059,9 @@ struct qla_hw_data {
uint32_t n2n_fw_acc_sec:1;
uint32_t plogi_template_valid:1;
uint32_t port_isolated:1;
+ uint32_t eeh_flush:2;
+#define EEH_FLUSH_RDY 1
+#define EEH_FLUSH_DONE 2
} flags;
uint16_t max_exchg;
@@ -4062,6 +4096,7 @@ struct qla_hw_data {
uint32_t rsp_que_len;
uint32_t req_que_off;
uint32_t rsp_que_off;
+ unsigned long eeh_jif;
/* Multi queue data structs */
device_reg_t *mqiobase;
@@ -4244,8 +4279,8 @@ struct qla_hw_data {
#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
-#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
- IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_BIDI_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
/* Bit 21 of fw_attributes decides the MCTP capabilities */
@@ -4262,8 +4297,10 @@ struct qla_hw_data {
#define QLA_ABTS_WAIT_ENABLED(_sp) \
(QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
-#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
-#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
+ IS_QLA28XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
IS_QLA28XX(ha))
@@ -4610,6 +4647,7 @@ struct qla_hw_data {
struct workqueue_struct *wq;
struct work_struct heartbeat_work;
struct qlfc_fw fw_buf;
+ unsigned long last_heartbeat_run_jiffies;
/* FCP_CMND priority support */
struct qla_fcp_prio_cfg *fcp_prio_cfg;
@@ -4735,6 +4773,7 @@ struct active_regions {
uint8_t vpd_nvram;
uint8_t npiv_config_0_1;
uint8_t npiv_config_2_3;
+ uint8_t nvme_params;
} aux;
};
@@ -4906,7 +4945,6 @@ typedef struct scsi_qla_host {
/* list of commands waiting on workqueue */
struct list_head qla_cmd_list;
- struct list_head qla_sess_op_cmd_list;
struct list_head unknown_atio_list;
spinlock_t cmd_list_lock;
struct delayed_work unknown_atio_work;
@@ -4998,6 +5036,10 @@ typedef struct scsi_qla_host {
u64 short_link_down_cnt;
struct edif_dbell e_dbell;
struct pur_core pur_cinfo;
+
+#define DPORT_DIAG_IN_PROGRESS BIT_0
+#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1
+ uint16_t dport_status;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -5016,6 +5058,7 @@ struct qla27xx_image_status {
#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1
#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2
#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3
+#define QLA28XX_AUX_IMG_NVME_PARAMS BIT_4
#define SET_VP_IDX 1
#define SET_AL_PA 2
@@ -5191,8 +5234,6 @@ struct secure_flash_update_block_pk {
#define QLA_DSDS_PER_IOCB 37
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
-
#define QLA_SG_ALL 1024
enum nexus_wait_type {
@@ -5410,7 +5451,7 @@ struct ql_vnd_stat_entry {
struct ql_vnd_stats {
u64 entry_count; /* Num of entries */
u64 rservd;
- struct ql_vnd_stat_entry entry[0]; /* Place holder of entries */
+ struct ql_vnd_stat_entry entry[]; /* Place holder of entries */
} __packed;
struct ql_vnd_host_stats_resp {
@@ -5427,4 +5468,14 @@ struct ql_vnd_tgt_stats_resp {
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
+
+#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \
+ _fcport->disc_state == DSC_DELETED)
+
+#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \
+ "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \
+ __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \
+ _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
+ _fp->flags
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 85bd0e468d43..777808af5634 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -489,6 +489,99 @@ qla_dfs_naqp_show(struct seq_file *s, void *unused)
return 0;
}
+/*
+ * Helper macros for setting up debugfs entries.
+ * _name: The name of the debugfs entry
+ * _ctx_struct: The context that was passed when creating the debugfs file
+ *
+ * QLA_DFS_SETUP_RD could be used when there is only a show function.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ *
+ * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
+ * - show function take the name qla_dfs_<sysfs-name>_show
+ * - write function take the name qla_dfs_<sysfs-name>_write
+ *
+ * To have a new debugfs entry, do:
+ * 1. Create a "struct dentry *" in the appropriate structure in the format
+ * dfs_<sysfs-name>
+ * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
+ * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
+ * or QLA_DFS_ROOT_CREATE_FILE
+ * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
+ * or QLA_DFS_ROOT_REMOVE_FILE
+ *
+ * Example for creating "TEST" sysfs file:
+ * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
+ * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t);
+ * 3. In qla2x00_dfs_setup():
+ * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
+ * 4. In qla2x00_dfs_remove():
+ * QLA_DFS_REMOVE_FILE(ha, TEST);
+ */
+#define QLA_DFS_SETUP_RD(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+#define QLA_DFS_SETUP_RW(_name, _ctx_struct) \
+static int \
+qla_dfs_##_name##_open(struct inode *inode, struct file *file) \
+{ \
+ _ctx_struct *__ctx = inode->i_private; \
+ \
+ return single_open(file, qla_dfs_##_name##_show, __ctx); \
+} \
+ \
+static const struct file_operations qla_dfs_##_name##_ops = { \
+ .open = qla_dfs_##_name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .write = qla_dfs_##_name##_write, \
+};
+
+#define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \
+ do { \
+ if (!qla_dfs_##_name) \
+ qla_dfs_##_name = debugfs_create_file(#_name, \
+ _perm, qla2x00_dfs_root, _ctx, \
+ &qla_dfs_##_name##_ops); \
+ } while (0)
+
+#define QLA_DFS_ROOT_REMOVE_FILE(_name) \
+ do { \
+ if (qla_dfs_##_name) { \
+ debugfs_remove(qla_dfs_##_name); \
+ qla_dfs_##_name = NULL; \
+ } \
+ } while (0)
+
+#define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \
+ do { \
+ (_struct)->dfs_##_name = debugfs_create_file(#_name, \
+ _perm, _parent, _ctx, \
+ &qla_dfs_##_name##_ops) \
+ } while (0)
+
+#define QLA_DFS_REMOVE_FILE(_struct, _name) \
+ do { \
+ if ((_struct)->dfs_##_name) { \
+ debugfs_remove((_struct)->dfs_##_name); \
+ (_struct)->dfs_##_name = NULL; \
+ } \
+ } while (0)
+
static int
qla_dfs_naqp_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 53d2b8562027..00ccc41cef14 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -52,6 +52,31 @@ const char *sc_to_str(uint16_t cmd)
return "unknown";
}
+static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct edb_node *edbnode = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+
+ /* db nodes are fifo - no qualifications done */
+ if (!list_empty(&vha->e_dbell.head)) {
+ edbnode = list_first_entry(&vha->e_dbell.head,
+ struct edb_node, list);
+ list_del_init(&edbnode->list);
+ }
+
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ return edbnode;
+}
+
+static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
+{
+ list_del_init(&node->list);
+ kfree(node);
+}
+
static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
uint16_t handle)
{
@@ -257,14 +282,8 @@ qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
f = NULL;
list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
- if ((f->flags & FCF_FCSP_DEVICE)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058,
- "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n",
- f->node_name, f->port_name,
- f->d_id.b24, id->b24);
- if (f->d_id.b24 == id->b24)
- return f;
- }
+ if (f->d_id.b24 == id->b24)
+ return f;
}
return NULL;
}
@@ -280,14 +299,19 @@ qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
{
/* check that the app is allow/known to the driver */
- if (appid.app_vid == EDIF_APP_ID) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__);
- return true;
+ if (appid.app_vid != EDIF_APP_ID) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
+ __func__, appid.app_vid);
+ return false;
}
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
- __func__, appid.app_vid);
- return false;
+ if (appid.version != EDIF_VERSION1) {
+ ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
+ __func__, appid.version);
+ return false;
+ }
+
+ return true;
}
static void
@@ -486,16 +510,35 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
/* mark doorbell as active since an app is now present */
vha->e_dbell.db_flags |= EDB_ACTIVE;
} else {
- ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n",
- __func__);
+ goto out;
}
if (N2N_TOPO(vha->hw)) {
- if (vha->hw->flags.n2n_fw_acc_sec)
- set_bit(N2N_LINK_RESET, &vha->dpc_flags);
- else
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ fcport->n2n_link_reset_cnt = 0;
+
+ if (vha->hw->flags.n2n_fw_acc_sec) {
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
+ qla_edif_sa_ctl_init(vha, fcport);
+
+ /*
+ * While authentication app was not running, remote device
+ * could still try to login with this local port. Let's
+ * clear the state and try again.
+ */
+ qla2x00_wait_for_sess_deletion(vha);
+
+ /* bounce the link to get the other guy to relogin */
+ if (!vha->hw->flags.n2n_bigger) {
+ set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else {
+ qla2x00_wait_for_hba_online(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+ }
} else {
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_edif, vha, 0x2058,
@@ -517,19 +560,31 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_DOWN)
break;
- fcport->edif.app_started = 1;
fcport->login_retry = vha->hw->login_retry_count;
- /* no activity */
fcport->edif.app_stop = 0;
+ fcport->edif.app_sess_online = 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport, true);
+
+ if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
+ continue;
+
+ rval = 0;
ql_dbg(ql_dbg_edif, vha, 0x911e,
"%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
__func__, fcport->port_name);
- fcport->edif.app_sess_online = 0;
qlt_schedule_sess_for_deletion(fcport);
qla_edif_sa_ctl_init(vha, fcport);
}
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
@@ -540,9 +595,11 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
__func__);
}
+out:
appreply.host_support_edif = vha->hw->flags.edif_enabled;
appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
appreply.edif_edb_active = vha->e_dbell.db_flags;
+ appreply.version = EDIF_VERSION1;
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
@@ -610,9 +667,6 @@ qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fcport->send_els_logo = 1;
qlt_schedule_sess_for_deletion(fcport);
-
- /* qla_edif_flush_sa_ctl_lists(fcport); */
- fcport->edif.app_started = 0;
}
}
@@ -657,7 +711,6 @@ qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
static int
qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
{
- int32_t rval = 0;
struct auth_complete_cmd appplogiok;
struct app_plogi_reply appplogireply = {0};
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
@@ -668,6 +721,12 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, &appplogiok,
sizeof(struct auth_complete_cmd));
+ /* silent unaligned access warning */
+ portid.b.domain = appplogiok.u.d_id.b.domain;
+ portid.b.area = appplogiok.u.d_id.b.area;
+ portid.b.al_pa = appplogiok.u.d_id.b.al_pa;
+
+ appplogireply.version = EDIF_VERSION1;
switch (appplogiok.type) {
case PL_TYPE_WWPN:
fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -678,7 +737,7 @@ qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
__func__, appplogiok.u.wwpn);
break;
case PL_TYPE_DID:
- fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id);
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
if (!fcport)
ql_dbg(ql_dbg_edif, vha, 0x911d,
"%s d_id lookup failed: %x\n", __func__,
@@ -753,7 +812,7 @@ errstate_exit:
&appplogireply,
sizeof(struct app_plogi_reply));
- return rval;
+ return 0;
}
/**
@@ -777,6 +836,11 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, &appplogifail,
sizeof(struct auth_complete_cmd));
+ /* silent unaligned access warning */
+ portid.b.domain = appplogifail.u.d_id.b.domain;
+ portid.b.area = appplogifail.u.d_id.b.area;
+ portid.b.al_pa = appplogifail.u.d_id.b.al_pa;
+
/*
* TODO: edif: app has failed this plogi. Inform driver to
* take any action (if any).
@@ -788,7 +852,7 @@ qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
SET_DID_STATUS(bsg_reply->result, DID_OK);
break;
case PL_TYPE_DID:
- fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id);
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
if (!fcport)
ql_dbg(ql_dbg_edif, vha, 0x911d,
"%s d_id lookup failed: %x\n", __func__,
@@ -855,6 +919,8 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (!(fcport->flags & FCF_FCSP_DEVICE))
continue;
@@ -871,9 +937,25 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
continue;
- app_reply->ports[pcnt].rekey_count =
- fcport->edif.rekey_cnt;
+ if (!N2N_TOPO(vha->hw)) {
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ continue;
+
+ if (fcport->port_type == FCT_UNKNOWN &&
+ !fcport->fc4_features)
+ rval = qla24xx_async_gffid(vha, fcport,
+ true);
+ if (!rval &&
+ !(fcport->fc4_features & FC4_FF_TARGET ||
+ fcport->port_type &
+ (FCT_TARGET | FCT_NVME_TARGET)))
+ continue;
+ }
+
+ rval = 0;
+
+ app_reply->ports[pcnt].version = EDIF_VERSION1;
app_reply->ports[pcnt].remote_type =
VND_CMD_RTYPE_UNKNOWN;
if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
@@ -970,6 +1052,8 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
} else {
struct fc_port *fcport = NULL, *tf;
+ app_reply->version = EDIF_VERSION1;
+
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
if (pcnt > app_req.num_ports)
@@ -1003,6 +1087,164 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
return rval;
}
+static int32_t
+qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_port *fcport;
+ struct aen_complete_cmd ack;
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
+
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: %06x event_code %x\n",
+ __func__, ack.port_id.b24, ack.event_code);
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+
+ if (!fcport) {
+ ql_dbg(ql_dbg_edif, vha, 0x70cf,
+ "%s: unable to find fcport %06x \n",
+ __func__, ack.port_id.b24);
+ return 0;
+ }
+
+ switch (ack.event_code) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ fcport->edif.sess_down_acked = 1;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ u32 sg_skip, reply_payload_len;
+ bool keep;
+ struct edb_node *dbnode = NULL;
+ struct edif_app_dbell ap;
+ int dat_size = 0;
+
+ sg_skip = 0;
+ reply_payload_len = bsg_job->reply_payload.payload_len;
+
+ while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
+ dbnode = qla_edb_getnext(vha);
+ if (dbnode) {
+ keep = true;
+ dat_size = 0;
+ ap.event_code = dbnode->ntype;
+ switch (dbnode->ntype) {
+ case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
+ case VND_CMD_AUTH_STATE_NEEDED:
+ ap.port_id = dbnode->u.plogi_did;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_ELS_RCVD:
+ ap.port_id = dbnode->u.els_sid;
+ dat_size += sizeof(ap.port_id);
+ break;
+ case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
+ ap.port_id = dbnode->u.sa_aen.port_id;
+ memcpy(&ap.event_data, &dbnode->u,
+ sizeof(struct edif_sa_update_aen));
+ dat_size += sizeof(struct edif_sa_update_aen);
+ break;
+ default:
+ keep = false;
+ ql_log(ql_log_warn, vha, 0x09102,
+ "%s unknown DB type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+ break;
+ }
+ ap.event_data_size = dat_size;
+ /* 8 = sizeof(ap.event_code + ap.event_data_size) */
+ dat_size += 8;
+ if (keep)
+ sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &ap, dat_size, sg_skip, false);
+
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s Doorbell consumed : type=%d %p\n",
+ __func__, dbnode->ntype, dbnode);
+
+ kfree(dbnode);
+ } else {
+ break;
+ }
+ }
+
+ SET_DID_STATUS(bsg_reply->result, DID_OK);
+ bsg_reply->reply_payload_rcv_len = sg_skip;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ return 0;
+}
+
+static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
+ u32 delay)
+{
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+
+ /* small sleep for doorbell events to accumulate */
+ if (delay)
+ msleep(delay);
+
+ qla_edif_consume_dbell(vha, bsg_job);
+
+ bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+}
+
+static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
+{
+ unsigned long flags;
+ struct bsg_job *prev_bsg_job = NULL;
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (vha->e_dbell.dbell_bsg_job) {
+ prev_bsg_job = vha->e_dbell.dbell_bsg_job;
+ vha->e_dbell.dbell_bsg_job = NULL;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (prev_bsg_job)
+ __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
+}
+
+static int
+qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+{
+ unsigned long flags;
+ bool return_bsg = false;
+
+ /* flush previous dbell bsg */
+ qla_edif_dbell_bsg_done(vha);
+
+ spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
+ if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
+ /*
+ * when the next db event happens, bsg_job will return.
+ * Otherwise, timer will return it.
+ */
+ vha->e_dbell.dbell_bsg_job = bsg_job;
+ vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
+ } else {
+ return_bsg = true;
+ }
+ spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
+
+ if (return_bsg)
+ __qla_edif_dbell_bsg_done(vha, bsg_job, 1);
+
+ return 0;
+}
+
int32_t
qla_edif_app_mgmt(struct bsg_job *bsg_job)
{
@@ -1014,8 +1256,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
bool done = true;
int32_t rval = 0;
uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
+ u32 level = ql_dbg_edif;
+
+ /* doorbell is high traffic */
+ if (vnd_sc == QL_VND_SC_READ_DBELL)
+ level = 0;
- ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n",
+ ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
__func__, vnd_sc);
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
@@ -1024,7 +1271,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
if (!vha->hw->flags.edif_enabled ||
test_bit(VPORT_DELETE, &vha->dpc_flags)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
__func__, bsg_job, vha->dpc_flags);
@@ -1033,7 +1280,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
}
if (!qla_edif_app_check(vha, appcheck)) {
- ql_dbg(ql_dbg_edif, vha, 0x911d,
+ ql_dbg(level, vha, 0x911d,
"%s app checked failed.\n",
__func__);
@@ -1065,6 +1312,13 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
case QL_VND_SC_GET_STATS:
rval = qla_edif_app_getstats(vha, bsg_job);
break;
+ case QL_VND_SC_AEN_COMPLETE:
+ rval = qla_edif_ack(vha, bsg_job);
+ break;
+ case QL_VND_SC_READ_DBELL:
+ rval = qla_edif_dbell_bsg(vha, bsg_job);
+ done = false;
+ break;
default:
ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
__func__,
@@ -1076,7 +1330,7 @@ qla_edif_app_mgmt(struct bsg_job *bsg_job)
done:
if (done) {
- ql_dbg(ql_dbg_user, vha, 0x7009,
+ ql_dbg(level, vha, 0x7009,
"%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job);
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
@@ -1238,6 +1492,8 @@ qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0
#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2
+#define EDIF_MSLEEP_INTERVAL 100
+#define EDIF_RETRY_COUNT 50
int
qla24xx_sadb_update(struct bsg_job *bsg_job)
@@ -1250,9 +1506,10 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
struct edif_list_entry *edif_entry = NULL;
int found = 0;
int rval = 0;
- int result = 0;
+ int result = 0, cnt;
struct qla_sa_update_frame sa_frame;
struct srb_iocb *iocb_cmd;
+ port_id_t portid;
ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
"%s entered, vha: 0x%p\n", __func__, vha);
@@ -1276,7 +1533,12 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
goto done;
}
- fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id);
+ /* silent unaligned access warning */
+ portid.b.domain = sa_frame.port_id.b.domain;
+ portid.b.area = sa_frame.port_id.b.area;
+ portid.b.al_pa = sa_frame.port_id.b.al_pa;
+
+ fcport = qla2x00_find_fcport_by_pid(vha, &portid);
if (fcport) {
found = 1;
if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
@@ -1289,7 +1551,7 @@ qla24xx_sadb_update(struct bsg_job *bsg_job)
ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
sa_frame.port_id.b24);
rval = -EINVAL;
- SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
+ SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
goto done;
}
@@ -1485,11 +1747,23 @@ force_rx_delete:
sp->done = qla2x00_bsg_job_done;
iocb_cmd = &sp->u.iocb_cmd;
iocb_cmd->u.sa_update.sa_frame = sa_frame;
-
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
ql_log(ql_dbg_edif, vha, 0x70e3,
- "qla2x00_start_sp failed=%d.\n", rval);
+ "%s qla2x00_start_sp failed=%d.\n",
+ __func__, rval);
qla2x00_rel_sp(sp);
rval = -EIO;
@@ -1782,30 +2056,6 @@ qla_edb_init(scsi_qla_host_t *vha)
/* initialize lock which protects doorbell & init list */
spin_lock_init(&vha->e_dbell.db_lock);
INIT_LIST_HEAD(&vha->e_dbell.head);
-
- /* create and initialize doorbell */
- init_completion(&vha->e_dbell.dbell);
-}
-
-static void
-qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
-{
- /*
- * releases the space held by this edb node entry
- * this function does _not_ free the edb node itself
- * NB: the edb node entry passed should not be on any list
- *
- * currently for doorbell there's no additional cleanup
- * needed, but here as a placeholder for furture use.
- */
-
- if (!node) {
- ql_dbg(ql_dbg_edif, vha, 0x09122,
- "%s error - no valid node passed\n", __func__);
- return;
- }
-
- node->ntype = N_UNDEF;
}
static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
@@ -1852,11 +2102,8 @@ static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- list_for_each_entry_safe(e, tmp, &edb_list, list) {
+ list_for_each_entry_safe(e, tmp, &edb_list, list)
qla_edb_node_free(vha, e);
- list_del_init(&e->list);
- kfree(e);
- }
}
/* function called when app is stopping */
@@ -1884,14 +2131,10 @@ qla_edb_stop(scsi_qla_host_t *vha)
"%s freeing edb_node type=%x\n",
__func__, node->ntype);
qla_edb_node_free(vha, node);
- list_del(&node->list);
-
- kfree(node);
}
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* wake up doorbell waiters - they'll be dismissed with error code */
- complete_all(&vha->e_dbell.dbell);
+ qla_edif_dbell_bsg_done(vha);
}
static struct edb_node *
@@ -1929,9 +2172,6 @@ qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
list_add_tail(&ptr->list, &vha->e_dbell.head);
spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
- /* ring doorbell for waiters */
- complete(&vha->e_dbell.dbell);
-
return true;
}
@@ -1995,47 +2235,29 @@ qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
edbnode->u.sa_aen.port_id = fcport->d_id;
edbnode->u.sa_aen.status = data;
edbnode->u.sa_aen.key_type = data2;
+ edbnode->u.sa_aen.version = EDIF_VERSION1;
break;
default:
ql_dbg(ql_dbg_edif, vha, 0x09102,
"%s unknown type: %x\n", __func__, dbtype);
- qla_edb_node_free(vha, edbnode);
kfree(edbnode);
edbnode = NULL;
break;
}
- if (edbnode && (!qla_edb_node_add(vha, edbnode))) {
+ if (edbnode) {
+ if (!qla_edb_node_add(vha, edbnode)) {
+ ql_dbg(ql_dbg_edif, vha, 0x09102,
+ "%s unable to add dbnode\n", __func__);
+ kfree(edbnode);
+ return;
+ }
ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s unable to add dbnode\n", __func__);
- qla_edb_node_free(vha, edbnode);
- kfree(edbnode);
- return;
- }
- if (edbnode && fcport)
- fcport->edif.auth_state = dbtype;
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
-}
-
-static struct edb_node *
-qla_edb_getnext(scsi_qla_host_t *vha)
-{
- unsigned long flags;
- struct edb_node *edbnode = NULL;
-
- spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
-
- /* db nodes are fifo - no qualifications done */
- if (!list_empty(&vha->e_dbell.head)) {
- edbnode = list_first_entry(&vha->e_dbell.head,
- struct edb_node, list);
- list_del(&edbnode->list);
+ "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
+ qla_edif_dbell_bsg_done(vha);
+ if (fcport)
+ fcport->edif.auth_state = dbtype;
}
-
- spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
-
- return edbnode;
}
void
@@ -2063,90 +2285,16 @@ qla_edif_timer(scsi_qla_host_t *vha)
ha->edif_post_stop_cnt_down = 60;
}
}
-}
-
-/*
- * app uses separate thread to read this. It'll wait until the doorbell
- * is rung by the driver or the max wait time has expired
- */
-ssize_t
-edif_doorbell_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- struct edb_node *dbnode = NULL;
- struct edif_app_dbell *ap = (struct edif_app_dbell *)buf;
- uint32_t dat_siz, buf_size, sz;
-
- /* TODO: app currently hardcoded to 256. Will transition to bsg */
- sz = 256;
-
- /* stop new threads from waiting if we're not init'd */
- if (DBELL_INACTIVE(vha)) {
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif db not enabled\n", __func__);
- return 0;
- }
-
- if (!vha->hw->flags.edif_enabled) {
- /* edif not enabled */
- ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122,
- "%s error - edif not enabled\n", __func__);
- return -1;
- }
-
- buf_size = 0;
- while ((sz - buf_size) >= sizeof(struct edb_node)) {
- /* remove the next item from the doorbell list */
- dat_siz = 0;
- dbnode = qla_edb_getnext(vha);
- if (dbnode) {
- ap->event_code = dbnode->ntype;
- switch (dbnode->ntype) {
- case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
- case VND_CMD_AUTH_STATE_NEEDED:
- ap->port_id = dbnode->u.plogi_did;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_ELS_RCVD:
- ap->port_id = dbnode->u.els_sid;
- dat_siz += sizeof(ap->port_id);
- break;
- case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
- ap->port_id = dbnode->u.sa_aen.port_id;
- memcpy(ap->event_data, &dbnode->u,
- sizeof(struct edif_sa_update_aen));
- dat_siz += sizeof(struct edif_sa_update_aen);
- break;
- default:
- /* unknown node type, rtn unknown ntype */
- ap->event_code = VND_CMD_AUTH_STATE_UNDEF;
- memcpy(ap->event_data, &dbnode->ntype, 4);
- dat_siz += 4;
- break;
- }
- ql_dbg(ql_dbg_edif, vha, 0x09102,
- "%s Doorbell consumed : type=%d %p\n",
- __func__, dbnode->ntype, dbnode);
- /* we're done with the db node, so free it up */
- qla_edb_node_free(vha, dbnode);
- kfree(dbnode);
- } else {
- break;
- }
-
- ap->event_data_size = dat_siz;
- /* 8bytes = ap->event_code + ap->event_data_size */
- buf_size += dat_siz + 8;
- ap = (struct edif_app_dbell *)(buf + buf_size);
- }
- return buf_size;
+ if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
+ qla_edif_dbell_bsg_done(vha);
}
static void qla_noop_sp_done(srb_t *sp, int res)
{
- sp->free(sp);
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
/*
@@ -2169,7 +2317,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sa_ctl) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"sa_ctl allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport = sa_ctl->fcport;
@@ -2179,7 +2328,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
if (!sp) {
ql_dbg(ql_dbg_edif, vha, 0x70e6,
"SRB allocation failed\n");
- return -ENOMEM;
+ rval = -ENOMEM;
+ goto done;
}
fcport->flags |= FCF_ASYNC_SENT;
@@ -2208,10 +2358,17 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- rval = QLA_FUNCTION_FAILED;
+ if (rval != QLA_SUCCESS) {
+ goto done_free_sp;
+ }
return rval;
+done_free_sp:
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
}
void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
@@ -2430,8 +2587,7 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
- if (DBELL_INACTIVE(vha) ||
- (fcport && EDIF_SESSION_DOWN(fcport))) {
+ if (DBELL_INACTIVE(vha)) {
ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
__func__, host->e_dbell.db_flags,
fcport ? fcport->d_id.b24 : 0);
@@ -2441,6 +2597,22 @@ void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
return;
}
+ if (fcport && EDIF_SESSION_DOWN(fcport)) {
+ ql_dbg(ql_dbg_edif, host, 0x13b6,
+ "%s terminate exchange. Send logo to 0x%x\n",
+ __func__, a.did.b24);
+
+ a.tx_byte_count = a.tx_len = 0;
+ a.tx_addr = 0;
+ a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */
+ qla_els_reject_iocb(host, (*rsp)->qpair, &a);
+ qla_enode_free(host, ptr);
+ /* send logo to let remote port knows to tear down session */
+ fcport->send_els_logo = 1;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
/* add the local enode to the list */
qla_enode_add(host, ptr);
@@ -2816,6 +2988,12 @@ qla28xx_start_scsi_edif(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword(req->req_q_out);
@@ -3007,6 +3185,7 @@ queuing_error:
mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
sp->u.scmd.ct6_ctx = NULL;
}
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(lock, flags);
return QLA_FUNCTION_FAILED;
@@ -3333,10 +3512,14 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fc_port_t *fcport = NULL;
struct qla_hw_data *ha = vha->hw;
srb_t *sp;
- int rval = (DID_ERROR << 16);
+ int rval = (DID_ERROR << 16), cnt;
port_id_t d_id;
struct qla_bsg_auth_els_request *p =
(struct qla_bsg_auth_els_request *)bsg_job->request;
+ struct qla_bsg_auth_els_reply *rpl =
+ (struct qla_bsg_auth_els_reply *)bsg_job->reply;
+
+ rpl->version = EDIF_VERSION1;
d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
@@ -3355,7 +3538,7 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (qla_bsg_check(vha, bsg_job, fcport))
return 0;
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ if (EDIF_SESS_DELETE(fcport)) {
ql_dbg(ql_dbg_edif, vha, 0x910d,
"%s ELS code %x, no loop id.\n", __func__,
bsg_request->rqst_data.r_els.els_code);
@@ -3424,17 +3607,26 @@ int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
sp->free = qla2x00_bsg_sp_free;
sp->done = qla2x00_bsg_job_done;
+ cnt = 0;
+retry:
rval = qla2x00_start_sp(sp);
-
- ql_dbg(ql_dbg_edif, vha, 0x700a,
- "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
- __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
- p->e.extra_rx_xchg_address, p->e.extra_control_flags,
- sp->handle, sp->remap.req.len, bsg_job);
-
- if (rval != QLA_SUCCESS) {
+ switch (rval) {
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_edif, vha, 0x700a,
+ "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
+ __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
+ p->e.extra_rx_xchg_address, p->e.extra_control_flags,
+ sp->handle, sp->remap.req.len, bsg_job);
+ break;
+ case EAGAIN:
+ msleep(EDIF_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < EDIF_RETRY_COUNT)
+ goto retry;
+ fallthrough;
+ default:
ql_log(ql_log_warn, vha, 0x700e,
- "qla2x00_start_sp failed = %d\n", rval);
+ "%s qla2x00_start_sp failed = %d\n", __func__, rval);
SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
rval = -EIO;
goto done_free_remap_rsp;
@@ -3456,14 +3648,29 @@ done:
void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
{
+ u16 cnt = 0;
+
if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
ql_dbg(ql_dbg_disc, vha, 0xf09c,
"%s: sess %8phN send port_offline event\n",
__func__, sess->port_name);
sess->edif.app_sess_online = 0;
+ sess->edif.sess_down_acked = 0;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
sess->d_id.b24, 0, sess);
qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
+
+ while (!READ_ONCE(sess->edif.sess_down_acked) &&
+ !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
+ msleep(100);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+ sess->edif.sess_down_acked = 0;
+ ql_dbg(ql_dbg_disc, vha, 0xf09c,
+ "%s: sess %8phN port_offline event completed\n",
+ __func__, sess->port_name);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
index a965ca8e47ce..7cdb89ccdc6e 100644
--- a/drivers/scsi/qla2xxx/qla_edif.h
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -51,7 +51,8 @@ struct edif_dbell {
enum db_flags_t db_flags;
spinlock_t db_lock;
struct list_head head;
- struct completion dbell;
+ struct bsg_job *dbell_bsg_job;
+ unsigned long bsg_expire;
};
#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */
@@ -140,4 +141,8 @@ struct enode {
(DBELL_ACTIVE(_fcport->vha) && \
(_fcport->disc_state == DSC_LOGIN_AUTH_PEND))
+#define EDIF_SESS_DELETE(_s) \
+ (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
+ _s->disc_state == DSC_DELETED))
+
#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 53026d82ebff..0931f4e4e127 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -7,13 +7,15 @@
#ifndef __QLA_EDIF_BSG_H
#define __QLA_EDIF_BSG_H
+#define EDIF_VERSION1 1
+
/* BSG Vendor specific commands */
#define ELS_MAX_PAYLOAD 2112
#ifndef WWN_SIZE
#define WWN_SIZE 8
#endif
-#define VND_CMD_APP_RESERVED_SIZE 32
-
+#define VND_CMD_APP_RESERVED_SIZE 28
+#define VND_CMD_PAD_SIZE 3
enum auth_els_sub_cmd {
SEND_ELS = 0,
SEND_ELS_REPLY,
@@ -28,7 +30,9 @@ struct extra_auth_els {
#define BSG_CTL_FLAG_LS_ACC 1
#define BSG_CTL_FLAG_LS_RJT 2
#define BSG_CTL_FLAG_TRM 3
- uint8_t extra_rsvd[3];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct qla_bsg_auth_els_request {
@@ -39,51 +43,46 @@ struct qla_bsg_auth_els_request {
struct qla_bsg_auth_els_reply {
struct fc_bsg_reply r;
uint32_t rx_xchg_address;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
};
struct app_id {
int app_vid;
- uint8_t app_key[32];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start_reply {
uint32_t host_support_edif;
uint32_t edif_enode_active;
uint32_t edif_edb_active;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_start {
struct app_id app_info;
- uint32_t prli_to;
- uint32_t key_shred;
uint8_t app_start_flags;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE - 1];
+ uint8_t version;
+ uint8_t pad[2];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_stop {
struct app_id app_info;
- char buf[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_plogi_reply {
uint32_t prli_status;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
-} __packed;
-
-#define RECFG_TIME 1
-#define RECFG_BYTES 2
-
-struct app_rekey_cfg {
- struct app_id app_info;
- uint8_t rekey_mode;
- port_id_t d_id;
- uint8_t force;
- union {
- int64_t bytes;
- int64_t time;
- } rky_units;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -91,7 +90,9 @@ struct app_pinfo_req {
struct app_id app_info;
uint8_t num_ports;
port_id_t remote_pid;
- uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
struct app_pinfo {
@@ -103,11 +104,8 @@ struct app_pinfo {
#define VND_CMD_RTYPE_INITIATOR 2
uint8_t remote_state;
uint8_t auth_state;
- uint8_t rekey_mode;
- int64_t rekey_count;
- int64_t rekey_config_value;
- int64_t rekey_consumed_value;
-
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -120,13 +118,17 @@ struct app_pinfo {
struct app_pinfo_reply {
uint8_t port_count;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
- struct app_pinfo ports[0];
+ struct app_pinfo ports[];
} __packed;
struct app_sinfo_req {
struct app_id app_info;
uint8_t num_ports;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
@@ -140,7 +142,10 @@ struct app_sinfo {
struct app_stats_reply {
uint8_t elem_count;
- struct app_sinfo elem[0];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ struct app_sinfo elem[];
} __packed;
struct qla_sa_update_frame {
@@ -163,9 +168,11 @@ struct qla_sa_update_frame {
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t port_id;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved2[VND_CMD_APP_RESERVED_SIZE];
} __packed;
-// used for edif mgmt bsg interface
#define QL_VND_SC_UNDEF 0
#define QL_VND_SC_SA_UPDATE 1
#define QL_VND_SC_APP_START 2
@@ -175,6 +182,22 @@ struct qla_sa_update_frame {
#define QL_VND_SC_REKEY_CONFIG 6
#define QL_VND_SC_GET_FCINFO 7
#define QL_VND_SC_GET_STATS 8
+#define QL_VND_SC_AEN_COMPLETE 9
+#define QL_VND_SC_READ_DBELL 10
+
+/*
+ * bsg caller to provide empty buffer for doorbell events.
+ *
+ * sg_io_v4.din_xferp = empty buffer for door bell events
+ * sg_io_v4.dout_xferp = struct edif_read_dbell *buf
+ */
+struct edif_read_dbell {
+ struct app_id app_info;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+};
+
/* Application interface data structure for rtn data */
#define EXT_DEF_EVENT_DATA_SIZE 64
@@ -191,7 +214,9 @@ struct edif_sa_update_aen {
port_id_t port_id;
uint32_t key_type; /* Tx (1) or RX (2) */
uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */
- uint8_t reserved[16];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define QL_VND_SA_STAT_SUCCESS 0
@@ -212,9 +237,22 @@ struct auth_complete_cmd {
uint8_t wwpn[WWN_SIZE];
port_id_t d_id;
} u;
- uint32_t reserved[VND_CMD_APP_RESERVED_SIZE];
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
+} __packed;
+
+struct aen_complete_cmd {
+ struct app_id app_info;
+ port_id_t port_id;
+ uint32_t event_code;
+ uint8_t version;
+ uint8_t pad[VND_CMD_PAD_SIZE];
+ uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
} __packed;
#define RX_DELAY_DELETE_TIMEOUT 20
+#define FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN 1
+
#endif /* QLA_EDIF_BSG_H */
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 073d06e88c58..f307beed9d29 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -807,7 +807,7 @@ struct els_entry_24xx {
#define EPD_ELS_COMMAND (0 << 13)
#define EPD_ELS_ACC (1 << 13)
#define EPD_ELS_RJT (2 << 13)
-#define EPD_RX_XCHG (3 << 13)
+#define EPD_RX_XCHG (3 << 13) /* terminate exchange */
#define ECF_CLR_PASSTHRU_PEND BIT_12
#define ECF_INCL_FRAME_HDR BIT_11
#define ECF_SEC_LOGIN BIT_3
@@ -1675,6 +1675,7 @@ struct qla_flt_location {
#define FLT_REG_VPD_SEC_27XX_1 0x52
#define FLT_REG_VPD_SEC_27XX_2 0xD8
#define FLT_REG_VPD_SEC_27XX_3 0xDA
+#define FLT_REG_NVME_PARAMS_27XX 0x21
/* 28xx */
#define FLT_REG_AUX_IMG_PRI_28XX 0x125
@@ -1691,6 +1692,8 @@ struct qla_flt_location {
#define FLT_REG_MPI_SEC_28XX 0xF0
#define FLT_REG_PEP_PRI_28XX 0xD1
#define FLT_REG_PEP_SEC_28XX 0xF1
+#define FLT_REG_NVME_PARAMS_PRI_28XX 0x14E
+#define FLT_REG_NVME_PARAMS_SEC_28XX 0x179
struct qla_flt_region {
__le16 code;
@@ -1706,7 +1709,7 @@ struct qla_flt_header {
__le16 length;
__le16 checksum;
__le16 unused;
- struct qla_flt_region region[0];
+ struct qla_flt_region region[];
};
#define FLT_REGION_SIZE 16
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 8d8503a28479..e3256e721be1 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -70,8 +70,6 @@ extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
-extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -163,6 +161,7 @@ extern int ql2xrdpenable;
extern int ql2xsmartsan;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
+extern int ql2xextended_error_logging_ktrace;
extern int ql2xiidmaenable;
extern int ql2xmqsupport;
extern int ql2xfwloadbin;
@@ -192,6 +191,7 @@ extern int ql2xfulldump_on_mpifail;
extern int ql2xsecenable;
extern int ql2xenforce_iocb_limit;
extern int ql2xabts_wait_nvme;
+extern u32 ql2xnvme_queues;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -276,7 +276,6 @@ extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *);
extern void qla2x00_sp_free_dma(srb_t *sp);
-extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int);
extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *);
@@ -316,7 +315,8 @@ extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern int qla2xxx_dif_start_scsi_mq(srb_t *);
-extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo);
+extern void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
+ void (*done)(struct srb *, int));
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
@@ -332,6 +332,8 @@ extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha,
struct qla_work_evt *e);
+void qla2x00_sp_release(struct kref *kref);
+void qla2x00_els_dcmd2_iocb_timeout(void *data);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -430,7 +432,8 @@ extern int
qla2x00_get_resource_cnts(scsi_qla_host_t *);
extern int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map);
+qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map,
+ u8 *num_entries);
extern int
qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
@@ -551,6 +554,10 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+extern int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *,
+ struct qla_dport_diag_v2 *, mbx_cmd_t *);
+
int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
@@ -605,7 +612,6 @@ void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **
/*
* Global Function Prototypes in qla_sup.c source file.
*/
-extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t,
@@ -724,7 +730,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
@@ -777,12 +783,6 @@ extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_delete_queues(struct scsi_qla_host *);
-extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
-extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
/* qlafx00 related functions */
extern int qlafx00_pci_config(struct scsi_qla_host *);
@@ -867,8 +867,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *);
extern void qla82xx_set_drv_active(scsi_qla_host_t *);
extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
-extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
-extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int);
/* ISP 8021 IDC */
extern void qla82xx_clear_drv_active(struct qla_hw_data *);
@@ -890,7 +888,7 @@ extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
-extern char *qdev_state(uint32_t);
+extern const char *qdev_state(uint32_t);
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
extern int qla82xx_read_temperature(scsi_qla_host_t *);
extern int qla8044_read_temperature(scsi_qla_host_t *);
@@ -986,7 +984,6 @@ fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id);
void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2,
fc_port_t *fcport);
void qla_edb_stop(scsi_qla_host_t *vha);
-ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf);
int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job);
void qla_enode_init(scsi_qla_host_t *vha);
void qla_enode_stop(scsi_qla_host_t *vha);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 28b574e20ef3..64ab070b8716 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -529,7 +529,6 @@ static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
if (!e)
goto err2;
- del_timer(&sp->u.iocb_cmd.timer);
e->u.iosb.sp = sp;
qla2x00_post_work(vha, e);
return;
@@ -556,8 +555,8 @@ err2:
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
- sp->free(sp);
-
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
@@ -592,13 +591,15 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
if (!vha->flags.online)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rft_id";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -638,8 +639,6 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x.\n",
@@ -653,7 +652,8 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
}
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -676,8 +676,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
return (QLA_SUCCESS);
}
- return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
- FC4_TYPE_FCP_SCSI);
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
}
static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
@@ -688,13 +687,15 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rff_id";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -727,13 +728,11 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
ct_req->req.rff_id.fc4_feature = fc4feature;
- ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x feature %x type %x.\n",
@@ -749,7 +748,8 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -779,13 +779,15 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rnid";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -823,9 +825,6 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x portid %06x\n",
sp->name, sp->handle, d_id->b24);
@@ -840,7 +839,8 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -886,13 +886,15 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
srb_t *sp;
struct ct_sns_pkt *ct_sns;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "rsnn_nn";
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_sns_sp_done);
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
@@ -936,9 +938,6 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_sns_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - hdl=%x.\n",
sp->name, sp->handle);
@@ -953,7 +952,8 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -1596,7 +1596,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
struct ct_fdmi_hba_attr *eiter;
uint16_t alen;
@@ -1617,7 +1616,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
alen = scnprintf(
eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
- "%s", "QLogic Corporation");
+ "%s", QLA2XXX_MANUFACTURER);
alen += FDMI_ATTR_ALIGNMENT(alen);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -1758,15 +1757,15 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
/* MAX CT Payload Length */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
- eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
+
alen = sizeof(eiter->a.max_ct_len);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
size += alen;
ql_dbg(ql_dbg_disc, vha, 0x20aa,
"CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
- /* Node Sybolic Name */
+ /* Node Symbolic Name */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
@@ -1851,7 +1850,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
unsigned int callopt)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb24 = (void *)ha->init_cb;
struct new_utsname *p_sysid = utsname();
char *hostname = p_sysid ?
p_sysid->nodename : fc_host_system_hostname(vha->host);
@@ -1903,8 +1901,7 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
/* Max frame size. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
- eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ?
- icb24->frame_payload_size : ha->init_cb->frame_payload_size));
+ eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
alen = sizeof(eiter->a.max_frame_size);
alen += FDMI_ATTR_TYPELEN(eiter);
eiter->len = cpu_to_be16(alen);
@@ -2893,7 +2890,8 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
qla24xx_handle_gpsc_event(vha, &ea);
done:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -2905,6 +2903,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -2913,8 +2912,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gpsc_sp_done);
/* CT_IU preamble */
ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
@@ -2932,9 +2931,6 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla24xx_async_gpsc_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x205e,
"Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
sp->name, fcport->port_name, sp->handle,
@@ -2947,7 +2943,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -2996,7 +2993,8 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
break;
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
@@ -3135,13 +3133,15 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
if (res) {
if (res == QLA_FUNCTION_TIMEOUT) {
qla24xx_post_gpnid_work(sp->vha, &ea.id);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
} else if (sp->gen1) {
/* There was another RSCN for this Nport ID */
qla24xx_post_gpnid_work(sp->vha, &ea.id);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
@@ -3162,7 +3162,8 @@ static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
@@ -3182,6 +3183,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
if (!vha->flags.online)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
@@ -3190,14 +3192,16 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->name = "gpnid";
sp->u.iocb_cmd.u.ctarg.id = *id;
sp->gen1 = 0;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnid_sp_done);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry(tsp, &vha->gpnid_list, elem) {
if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
tsp->gen1++;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
goto done;
}
}
@@ -3238,9 +3242,6 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_gpnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x2067,
"Async-%s hdl=%x ID %3phC.\n", sp->name,
sp->handle, &ct_req->req.port_id.port_id);
@@ -3270,25 +3271,18 @@ done_free_sp:
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- qla24xx_post_gnl_work(vha, fcport);
-}
void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
fc_port_t *fcport = sp->fcport;
struct ct_sns_rsp *ct_rsp;
- struct event_arg ea;
uint8_t fc4_scsi_feat;
uint8_t fc4_nvme_feat;
@@ -3296,10 +3290,10 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
"Async done-%s res %x ID %x. %8phC\n",
sp->name, res, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
- ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp;
fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ sp->rc = res;
/*
* FC-GS-7, 5.2.3.12 FC-4 Features - format
@@ -3320,68 +3314,129 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
}
}
- memset(&ea, 0, sizeof(ea));
- ea.sp = sp;
- ea.fcport = sp->fcport;
- ea.rc = res;
+ if (sp->flags & SRB_WAKEUP_ON_COMP) {
+ complete(sp->comp);
+ } else {
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
- qla24xx_handle_gffid_event(vha, &ea);
- sp->free(sp);
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /* we should not be here */
+ dump_stack();
+ }
}
/* Get FC4 Feature with Nport ID. */
-int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
srb_t *sp;
+ DECLARE_COMPLETION_ONSTACK(comp);
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ /* this routine does not have handling for no wait */
+ if (!vha->flags.online || !wait)
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gffid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gffid_sp_done);
+ sp->comp = &comp;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
+
+ if (wait)
+ sp->flags = SRB_WAKEUP_ON_COMP;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns response.\n",
+ __func__);
+ goto done_free_sp;
+ }
/* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
- GFF_ID_RSP_SIZE);
+ ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE);
ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla24xx_async_gffid_sp_done;
-
- ql_dbg(ql_dbg_disc, vha, 0x2132,
- "Async-%s hdl=%x %8phC.\n", sp->name,
- sp->handle, fcport->port_name);
-
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+
+ if (rval != QLA_SUCCESS) {
+ rval = QLA_FUNCTION_FAILED;
goto done_free_sp;
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x3074,
+ "Async-%s hdl=%x portid %06x\n",
+ sp->name, sp->handle, fcport->d_id.b24);
+ }
+
+ wait_for_completion(sp->comp);
+ rval = sp->rc;
- return rval;
done_free_sp:
- sp->free(sp);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
@@ -3574,7 +3629,7 @@ login_logout:
do_delete) {
if (fcport->loop_id != FC_NO_LOOP_ID) {
if (fcport->flags & FCF_FCP2_DEVICE)
- fcport->logout_on_delete = 0;
+ continue;
ql_log(ql_log_warn, vha, 0x20f0,
"%s %d %8phC post del sess\n",
@@ -3767,7 +3822,6 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
"Async done-%s res %x FC4Type %x\n",
sp->name, res, sp->gen2);
- del_timer(&sp->u.iocb_cmd.timer);
sp->rc = res;
if (res) {
unsigned long flags;
@@ -3892,9 +3946,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->name = "gnnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnft_gnnft_sp_done);
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
@@ -3910,8 +3963,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
@@ -3938,8 +3989,8 @@ done_free_sp:
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
@@ -3991,9 +4042,12 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
"%s: Performing FCP Scan\n", __func__);
- if (sp)
- sp->free(sp); /* should not happen */
+ if (sp) {
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ }
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp) {
spin_lock_irqsave(&vha->work_lock, flags);
@@ -4038,6 +4092,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
+ /* ref: INIT */
qla2x00_rel_sp(sp);
return rval;
}
@@ -4057,9 +4112,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->name = "gpnft";
sp->gen1 = vha->hw->base_qpair->chip_reset;
sp->gen2 = fc4_type;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gpnft_gnnft_sp_done);
rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
@@ -4074,8 +4128,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gpnft_gnnft_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
sp->handle, ct_req->req.gpn_ft.port_type);
@@ -4103,7 +4155,8 @@ done_free_sp:
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
@@ -4167,7 +4220,8 @@ static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
qla24xx_handle_gnnid_event(vha, &ea);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -4180,6 +4234,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
@@ -4189,9 +4244,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->name = "gnnid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gnnid_sp_done);
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
@@ -4210,8 +4264,6 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gnnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
@@ -4223,7 +4275,8 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
@@ -4297,7 +4350,8 @@ static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
qla24xx_handle_gfpnid_event(vha, &ea);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
@@ -4309,6 +4363,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
@@ -4317,9 +4372,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->name = "gfpnid";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_gfpnid_sp_done);
/* CT_IU preamble */
ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
@@ -4338,8 +4392,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
- sp->done = qla2x00_async_gfpnid_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
sp->name, fcport->port_name,
@@ -4352,7 +4404,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 070b636802d0..e12db95de688 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -47,10 +47,20 @@ qla2x00_sp_timeout(struct timer_list *t)
{
srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
+ scsi_qla_host_t *vha = sp->vha;
WARN_ON(irqs_disabled());
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
+
+ /* ref: TMR */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+
+ if (vha && qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9008,
+ "PCI/Register disconnect.\n");
+ qla_pci_set_eeh_busy(vha);
+ }
}
void qla2x00_sp_free(srb_t *sp)
@@ -125,8 +135,13 @@ static void qla24xx_abort_iocb_timeout(void *data)
}
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- if (sp->cmd_sp)
+ if (sp->cmd_sp) {
+ /*
+ * This done function should take care of
+ * original command ref: INIT
+ */
sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
+ }
abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
sp->done(sp, QLA_OS_TIMER_EXPIRED);
@@ -140,11 +155,11 @@ static void qla24xx_abort_sp_done(srb_t *sp, int res)
if (orig_sp)
qla_wait_nvme_release_cmd_kref(orig_sp);
- del_timer(&sp->u.iocb_cmd.timer);
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&abt->u.abt.comp);
else
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
@@ -153,12 +168,15 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
+ /* ref: INIT for ABTS command */
sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
GFP_ATOMIC);
if (!sp)
return QLA_MEMORY_ALLOC_FAILED;
+ QLA_VHA_MARK_BUSY(vha, bail);
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
@@ -167,23 +185,22 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
- abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
/* FW can send 2 x ABTS's timeout/20s */
- qla2x00_init_timer(sp, 42);
+ qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
+ sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
- sp->done = qla24xx_abort_sp_done;
-
ql_dbg(ql_dbg_async, vha, 0x507c,
"Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
cmd_sp->type);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
@@ -191,7 +208,8 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_ERR_FROM_FW;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
return rval;
@@ -286,10 +304,13 @@ static void qla2x00_async_login_sp_done(srb_t *sp, int res)
ea.iop[0] = lio->u.logio.iop[0];
ea.iop[1] = lio->u.logio.iop[1];
ea.sp = sp;
+ if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_plogi_done_event(vha, &ea);
}
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
@@ -308,6 +329,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
}
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -320,12 +342,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->name = "login";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_login_sp_done);
lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_login_sp_done;
if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
} else {
@@ -358,7 +378,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
@@ -370,29 +391,26 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
sp->fcport->login_gen++;
qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_LOGOUT_CMD;
sp->name = "logout";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_logout_sp_done;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_logout_sp_done),
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
@@ -406,7 +424,8 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
return rval;
@@ -432,29 +451,26 @@ static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
if (!test_bit(UNLOADING, &vha->dpc_flags))
qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
lio->u.logio.data);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *lio;
int rval;
rval = QLA_FUNCTION_FAILED;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_PRLO_CMD;
sp->name = "prlo";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_prlo_sp_done;
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_prlo_sp_done);
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
@@ -468,7 +484,8 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
@@ -551,10 +568,12 @@ static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
ea.iop[1] = lio->u.logio.iop[1];
ea.fcport = sp->fcport;
ea.sp = sp;
+ if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_adisc_event(vha, &ea);
-
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
@@ -565,26 +584,34 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
struct srb_iocb *lio;
int rval = QLA_FUNCTION_FAILED;
+ if (IS_SESSION_DELETED(fcport)) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
+ return rval;
+ }
+
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
fcport->flags |= FCF_ASYNC_SENT;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_ADISC_CMD;
sp->name = "adisc";
-
- lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_adisc_sp_done);
- sp->done = qla2x00_async_adisc_sp_done;
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
+ lio = &sp->u.iocb_cmd;
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
+ }
ql_dbg(ql_dbg_disc, vha, 0x206f,
"Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
@@ -597,7 +624,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_post_async_adisc_work(vha, fcport, data);
@@ -963,6 +991,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
}
break;
+ case ISP_CFG_NL:
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
default:
break;
}
@@ -1078,13 +1109,13 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
- struct srb_iocb *mbx;
int rval = QLA_FUNCTION_FAILED;
unsigned long flags;
u16 *mb;
@@ -1109,6 +1140,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -1117,10 +1149,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gnl_sp_done);
mb = sp->u.iocb_cmd.u.mbx.out_mb;
mb[0] = MBC_PORT_NODE_NAME_LIST;
@@ -1132,8 +1162,6 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
mb[8] = vha->gnl.size;
mb[9] = vha->vp_idx;
- sp->done = qla24xx_async_gnl_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x20da,
"Async-%s - OUT WWPN %8phC hndl %x\n",
sp->name, fcport->port_name, sp->handle);
@@ -1145,7 +1173,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
return rval;
@@ -1191,7 +1220,7 @@ done:
dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
sp->u.iocb_cmd.u.mbx.in_dma);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
@@ -1232,11 +1261,13 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
ea.sp = sp;
if (res == QLA_OS_TIMER_EXPIRED)
ea.data[0] = QLA_OS_TIMER_EXPIRED;
+ else if (res)
+ ea.data[0] = MBS_COMMAND_ERROR;
qla24xx_handle_prli_done_event(vha, &ea);
}
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int
@@ -1269,12 +1300,10 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
sp->type = SRB_PRLI_CMD;
sp->name = "prli";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_prli_sp_done);
lio = &sp->u.iocb_cmd;
- lio->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- sp->done = qla2x00_async_prli_sp_done;
lio->u.logio.flags = 0;
if (NVME_TARGET(vha->hw, fcport))
@@ -1296,7 +1325,8 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
@@ -1325,14 +1355,21 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
- fcport->loop_id == FC_NO_LOOP_ID) {
+ if (IS_SESSION_DELETED(fcport)) {
ql_log(ql_log_warn, vha, 0xffff,
- "%s: %8phC - not sending command.\n",
- __func__, fcport->port_name);
+ "%s: %8phC is being delete - not sending command.\n",
+ __func__, fcport->port_name);
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return rval;
}
+ if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: %8phC online %d flags %x - not sending command.\n",
+ __func__, fcport->port_name, vha->flags.online, fcport->flags);
+ goto done;
+ }
+
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -1344,10 +1381,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
sp->gen2 = fcport->login_gen;
-
- mbx = &sp->u.iocb_cmd;
- mbx->timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla24xx_async_gpdb_sp_done);
pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
@@ -1366,11 +1401,10 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
mb[9] = vha->vp_idx;
mb[10] = opt;
- mbx->u.mbx.in = pd;
+ mbx = &sp->u.iocb_cmd;
+ mbx->u.mbx.in = (void *)pd;
mbx->u.mbx.in_dma = pd_dma;
- sp->done = qla24xx_async_gpdb_sp_done;
-
ql_dbg(ql_dbg_disc, vha, 0x20dc,
"Async-%s %8phC hndl %x opt %x\n",
sp->name, fcport->port_name, sp->handle, opt);
@@ -1384,7 +1418,7 @@ done_free_sp:
if (pd)
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
@@ -1455,7 +1489,6 @@ static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport,
ql_dbg(ql_dbg_disc, vha, 0x20ef,
"%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
__func__, __LINE__, fcport->port_name);
- fcport->edif.app_started = 1;
fcport->edif.app_sess_online = 1;
qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
@@ -1556,6 +1589,11 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
u8 login = 0;
int rc;
+ ql_dbg(ql_dbg_disc, vha, 0x307b,
+ "%s %8phC DS %d LS %d lid %d retries=%d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
+
if (qla_tgt_mode_enabled(vha))
return;
@@ -1614,7 +1652,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->login_gen, fcport->loop_id, fcport->scan_state,
fcport->fc4_type);
- if (fcport->scan_state != QLA_FCPORT_FOUND)
+ if (fcport->scan_state != QLA_FCPORT_FOUND ||
+ fcport->disc_state == DSC_DELETE_PEND)
return 0;
if ((fcport->loop_id != FC_NO_LOOP_ID) &&
@@ -1635,7 +1674,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
return 0;
- if (fcport->flags & FCF_ASYNC_SENT) {
+ if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
}
@@ -1732,8 +1771,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_PEND:
- if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ if (vha->hw->flags.edif_enabled)
+ break;
+
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post %s PRLI\n",
+ __func__, __LINE__, fcport->port_name,
+ NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
qla24xx_post_prli_work(vha, fcport);
+ }
break;
case DSC_UPD_FCPORT:
@@ -1787,7 +1834,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_PORT_ADDR:
fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
if (fcport) {
- if (fcport->flags & FCF_FCP2_DEVICE) {
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, vha, 0x2115,
"Delaying session delete for FCP2 portid=%06x %8phC ",
fcport->d_id.b24, fcport->port_name);
@@ -1819,7 +1867,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_AREA_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
@@ -1830,7 +1879,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_DOM_ADDR:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
@@ -1842,7 +1892,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_FAB_ADDR:
default:
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->flags & FCF_FCP2_DEVICE)
+ if (fcport->flags & FCF_FCP2_DEVICE &&
+ atomic_read(&fcport->state) == FCS_ONLINE)
continue;
fcport->scan_needed = 1;
@@ -1969,23 +2020,24 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ uint8_t bail;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- tm_iocb = &sp->u.iocb_cmd;
+ QLA_VHA_MARK_BUSY(vha, bail);
sp->type = SRB_TM_CMD;
sp->name = "tmf";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
+ qla2x00_tmf_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
- tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
+ tm_iocb = &sp->u.iocb_cmd;
init_completion(&tm_iocb->u.tmf.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
-
tm_iocb->u.tmf.flags = flags;
tm_iocb->u.tmf.lun = lun;
- tm_iocb->u.tmf.data = tag;
- sp->done = qla2x00_tmf_sp_done;
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
@@ -2015,7 +2067,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
}
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
@@ -2074,13 +2127,6 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
- if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
- (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
- break;
- }
-
sp = ea->sp;
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC priority %s, fc4type %x prev try %s\n",
@@ -2100,6 +2146,13 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
}
if (N2N_TOPO(vha->hw)) {
+ if (ea->fcport->n2n_link_reset_cnt ==
+ vha->hw->login_retry_count &&
+ ea->fcport->flags & FCF_FCSP_DEVICE) {
+ /* remote authentication app just started */
+ ea->fcport->n2n_link_reset_cnt = 0;
+ }
+
if (ea->fcport->n2n_link_reset_cnt <
vha->hw->login_retry_count) {
ea->fcport->n2n_link_reset_cnt++;
@@ -2224,12 +2277,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
- ea->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED);
- if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- else
- qla2x00_mark_device_lost(vha, ea->fcport, 1);
+ qlt_schedule_sess_for_deletion(ea->fcport);
break;
case MBS_LOOP_ID_USED:
/* data[1] = IO PARAM 1 = nport ID */
@@ -3472,6 +3520,14 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
struct rsp_que *rsp = ha->rsp_q_map[0];
struct qla2xxx_fw_dump *fw_dump;
+ if (ha->fw_dump) {
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "Firmware dump already allocated.\n");
+ return;
+ }
+
+ ha->fw_dumped = 0;
+ ha->fw_dump_cap_flags = 0;
dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
req_q_size = rsp_q_size = 0;
@@ -3482,7 +3538,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x11000 + 1) *
sizeof(uint16_t);
} else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ if (IS_QLA83XX(ha))
fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
else if (IS_QLA81XX(ha))
fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -3494,8 +3550,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mem_size = (ha->fw_memory_size - 0x100000 + 1) *
sizeof(uint32_t);
if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
- !IS_QLA28XX(ha))
+ if (!IS_QLA83XX(ha))
mq_size = sizeof(struct qla2xxx_mq_chain);
/*
* Allocate maximum buffer size for all queues - Q0.
@@ -4056,8 +4111,7 @@ enable_82xx_npiv:
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (IS_QLA83XX(ha)) {
ha->flags.fac_supported = 0;
rval = QLA_SUCCESS;
}
@@ -4484,6 +4538,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
BIT_6) != 0;
ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
+ /* Init_cb will be reused for other command(s). Save a backup copy of port_name */
+ memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
}
/* ELS pass through payload is limit by frame size. */
@@ -5248,9 +5304,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
- if (vha->e_dbell.db_flags == EDB_ACTIVE)
- fcport->edif.app_started = 1;
-
spin_lock_init(&fcport->edif.indx_list_lock);
INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
@@ -5463,6 +5516,22 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
+static void
+qla_reinitialize_link(scsi_qla_host_t *vha)
+{
+ int rval;
+
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ rval = qla2x00_full_login_lip(vha);
+ if (rval == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xd051,
+ "Link reinitialization failed (%d)\n", rval);
+ }
+}
+
/*
* qla2x00_configure_local_loop
* Updates Fibre Channel Device Database with local loop devices.
@@ -5514,6 +5583,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&vha->work_lock, flags);
if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ u8 loop_map_entries = 0;
+ int rc;
+
+ rc = qla2x00_get_fcal_position_map(vha, NULL,
+ &loop_map_entries);
+ if (rc == QLA_SUCCESS && loop_map_entries > 1) {
+ /*
+ * There are devices that are still not logged
+ * in. Reinitialize to give them a chance.
+ */
+ qla_reinitialize_link(vha);
+ return QLA_FUNCTION_FAILED;
+ }
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5602,6 +5684,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
fcport->scan_state = QLA_FCPORT_FOUND;
+ if (fcport->login_retry == 0) {
+ fcport->login_retry = vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0x2135,
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id,
+ fcport->login_retry);
+ }
found++;
break;
}
@@ -5828,13 +5917,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_dfs_create_rport(vha, fcport);
- if (NVME_TARGET(vha->hw, fcport)) {
- qla_nvme_register_remote(vha, fcport);
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- return;
- }
-
qla24xx_update_fcport_fcp_prio(vha, fcport);
switch (vha->host->active_mode) {
@@ -5856,6 +5938,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ if (NVME_TARGET(vha->hw, fcport))
+ qla_nvme_register_remote(vha, fcport);
+
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
@@ -6777,29 +6862,6 @@ __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-static const char *
-qla83xx_dev_state_to_string(uint32_t dev_state)
-{
- switch (dev_state) {
- case QLA8XXX_DEV_COLD:
- return "COLD/RE-INIT";
- case QLA8XXX_DEV_INITIALIZING:
- return "INITIALIZING";
- case QLA8XXX_DEV_READY:
- return "READY";
- case QLA8XXX_DEV_NEED_RESET:
- return "NEED RESET";
- case QLA8XXX_DEV_NEED_QUIESCENT:
- return "NEED QUIESCENT";
- case QLA8XXX_DEV_FAILED:
- return "FAILED";
- case QLA8XXX_DEV_QUIESCENT:
- return "QUIESCENT";
- default:
- return "Unknown";
- }
-}
-
/* Assumes idc-lock always held on entry */
void
qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
@@ -6853,9 +6915,8 @@ qla83xx_initiating_reset(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
} else {
- const char *state = qla83xx_dev_state_to_string(dev_state);
-
- ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
+ ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
+ qdev_state(dev_state));
/* SV: XXX: Is timeout required here? */
/* Wait for IDC state change READY -> NEED_RESET */
@@ -7192,6 +7253,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
+
if (vha->hw->flags.port_isolated)
return status;
@@ -7869,6 +7933,9 @@ qla28xx_component_status(
active_regions->aux.npiv_config_2_3 =
qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
+
+ active_regions->aux.nvme_params =
+ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS);
}
static int
@@ -7977,11 +8044,12 @@ check_valid_image:
}
ql_dbg(ql_dbg_init, vha, 0x018f,
- "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
+ "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n",
active_regions->aux.board_config,
active_regions->aux.vpd_nvram,
active_regions->aux.npiv_config_0_1,
- active_regions->aux.npiv_config_2_3);
+ active_regions->aux.npiv_config_2_3,
+ active_regions->aux.nvme_params);
}
void
@@ -9394,7 +9462,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->rsp->req = qpair->req;
qpair->rsp->qpair = qpair;
/* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(qpair, smp_processor_id());
+ qla_cpu_update(qpair, raw_smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
@@ -9652,6 +9720,12 @@ int qla2xxx_disable_port(struct Scsi_Host *host)
vha->hw->flags.port_isolated = 1;
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9006,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
if (qla2x00_chip_is_down(vha))
return 0;
@@ -9667,6 +9741,13 @@ int qla2xxx_enable_port(struct Scsi_Host *host)
{
scsi_qla_host_t *vha = shost_priv(host);
+ if (qla2x00_isp_reg_stat(vha->hw)) {
+ ql_log(ql_log_info, vha, 0x9001,
+ "PCI/Register disconnect, exiting.\n");
+ qla_pci_set_eeh_busy(vha);
+ return FAILED;
+ }
+
vha->hw->flags.port_isolated = 0;
/* Set the flag to 1, so that isp_abort can proceed */
vha->flags.online = 1;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5f3b7995cc8f..db17f7f410cd 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -184,6 +184,8 @@ static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
sp->vha = vha;
sp->qpair = qpair;
sp->cmd_type = TYPE_SRB;
+ /* ref : INIT - normal flow */
+ kref_init(&sp->cmd_kref);
INIT_LIST_HEAD(&sp->elem);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ed604f2185bf..42ce4e1fe744 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2560,11 +2560,38 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
}
}
-void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
+static void
+qla2x00_async_done(struct srb *sp, int res)
+{
+ if (del_timer(&sp->u.iocb_cmd.timer)) {
+ /*
+ * Successfully cancelled the timeout handler
+ * ref: TMR
+ */
+ if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
+ return;
+ }
+ sp->async_done(sp, res);
+}
+
+void
+qla2x00_sp_release(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+
+ sp->free(sp);
+}
+
+void
+qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
+ void (*done)(struct srb *sp, int res))
{
timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
- sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
+ sp->done = qla2x00_async_done;
+ sp->async_done = done;
sp->free = qla2x00_sp_free;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
sp->start_timer = 1;
@@ -2651,7 +2678,9 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
return -ENOMEM;
}
- /* Alloc SRB structure */
+ /* Alloc SRB structure
+ * ref: INIT
+ */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
kfree(fcport);
@@ -2672,18 +2701,19 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
- elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
- init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
- sp->done = qla2x00_els_dcmd_sp_done;
+ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
+ qla2x00_els_dcmd_sp_done);
sp->free = qla2x00_els_dcmd_sp_free;
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
+ init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
GFP_KERNEL);
if (!elsio->u.els_logo.els_logo_pyld) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return QLA_FUNCTION_FAILED;
}
@@ -2706,7 +2736,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return QLA_FUNCTION_FAILED;
}
@@ -2717,7 +2748,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
wait_for_completion(&elsio->u.els_logo.comp);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
@@ -2787,7 +2819,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
sp->vha->qla_stats.control_requests++;
}
-static void
+void
qla2x00_els_dcmd2_iocb_timeout(void *data)
{
srb_t *sp = data;
@@ -2850,7 +2882,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
- del_timer(&sp->u.iocb_cmd.timer);
+ /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
+ fcport->logout_on_delete = 1;
+ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
@@ -2927,6 +2961,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ break;
}
fallthrough;
default:
@@ -2936,9 +2971,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport,
- DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
break;
@@ -2950,8 +2983,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
fw_status[0], fw_status[1], fw_status[2]);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qlt_schedule_sess_for_deletion(fcport);
break;
}
@@ -2960,7 +2992,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
struct srb_iocb *elsio = &sp->u.iocb_cmd;
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return;
}
e->u.iosb.sp = sp;
@@ -2978,7 +3011,9 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
int rval = QLA_SUCCESS;
void *ptr, *resp_ptr;
- /* Alloc SRB structure */
+ /* Alloc SRB structure
+ * ref: INIT
+ */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
@@ -2993,17 +3028,16 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dbg(ql_dbg_io, vha, 0x3073,
"%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
- sp->type = SRB_ELS_DCMD;
- sp->name = "ELS_DCMD";
- sp->fcport = fcport;
-
- elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
- qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
+ sp->type = SRB_ELS_DCMD;
+ sp->name = "ELS_DCMD";
+ sp->fcport = fcport;
+ qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
+ qla2x00_els_dcmd2_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
- sp->done = qla2x00_els_dcmd2_sp_done;
elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
ptr = elsio->u.els_plogi.els_plogi_pyld =
@@ -3068,7 +3102,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
out:
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
@@ -3879,8 +3914,15 @@ qla2x00_start_sp(srb_t *sp)
break;
}
- if (sp->start_timer)
+ if (sp->start_timer) {
+ /* ref: TMR timer ref
+ * this code should be just before start_iocbs function
+ * This will make sure that caller function don't to do
+ * kref_put even on failure
+ */
+ kref_get(&sp->cmd_kref);
add_timer(&sp->u.iocb_cmd.timer);
+ }
wmb();
qla2x00_start_iocbs(vha, qp->req);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index aaf6504570fd..e19fde304e5c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -49,11 +49,11 @@ qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
}
const char *const port_state_str[] = {
- "Unknown",
- "UNCONFIGURED",
- "DEAD",
- "LOST",
- "ONLINE"
+ [FCS_UNKNOWN] = "Unknown",
+ [FCS_UNCONFIGURED] = "UNCONFIGURED",
+ [FCS_DEVICE_DEAD] = "DEAD",
+ [FCS_DEVICE_LOST] = "LOST",
+ [FCS_ONLINE] = "ONLINE"
};
static void
@@ -1354,9 +1354,7 @@ skip_rio:
if (!vha->vp_idx) {
if (ha->flags.fawwpn_enabled &&
(ha->current_topology == ISP_CFG_F)) {
- void *wwpn = ha->init_cb->port_name;
-
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+ memcpy(vha->port_name, ha->port_name, WWN_SIZE);
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
@@ -1761,6 +1759,9 @@ global_port_update:
break;
case MBA_DPORT_DIAGNOSTICS:
+ if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
+ (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
+ vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
ql_dbg(ql_dbg_async, vha, 0x5052,
"D-Port Diagnostics: %04x %04x %04x %04x\n",
mb[0], mb[1], mb[2], mb[3]);
@@ -2245,9 +2246,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
res = DID_ERROR << 16;
}
- if (logit) {
- if (sp->remap.remapped &&
- ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (sp->remap.remapped &&
+ ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
+ if (logit) {
ql_dbg(ql_dbg_user, vha, 0x503f,
"%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
type, sp->handle, comp_status);
@@ -2259,18 +2260,24 @@ qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
pkt)->total_byte_count),
e->s_id[0], e->s_id[2], e->s_id[1],
e->d_id[2], e->d_id[1], e->d_id[0]);
- } else {
- ql_log(ql_log_info, vha, 0x503f,
- "%s IOCB Done hdl=%x comp_status=0x%x\n",
- type, sp->handle, comp_status);
- ql_log(ql_log_info, vha, 0x503f,
- "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
- fw_status[1], fw_status[2],
- le32_to_cpu(((struct els_sts_entry_24xx *)
- pkt)->total_byte_count),
- e->s_id[0], e->s_id[2], e->s_id[1],
- e->d_id[2], e->d_id[1], e->d_id[0]);
}
+ if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
+ sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s rcv reject. Sched delete\n", __func__);
+ qlt_schedule_sess_for_deletion(sp->fcport);
+ }
+ } else if (logit) {
+ ql_log(ql_log_info, vha, 0x503f,
+ "%s IOCB Done hdl=%x comp_status=0x%x\n",
+ type, sp->handle, comp_status);
+ ql_log(ql_log_info, vha, 0x503f,
+ "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
+ fw_status[1], fw_status[2],
+ le32_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count),
+ e->s_id[0], e->s_id[2], e->s_id[1],
+ e->d_id[2], e->d_id[1], e->d_id[0]);
}
}
goto els_ct_done;
@@ -2498,6 +2505,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
iocb->u.tmf.data = QLA_FUNCTION_FAILED;
} else if ((le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
+ host_to_fcp_swap(sts->data, sizeof(sts->data));
if (le32_to_cpu(sts->rsp_data_len) < 4) {
ql_log(ql_log_warn, fcport->vha, 0x503b,
"Async-%s error - hdl=%x not enough response(%d).\n",
@@ -2638,7 +2646,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (unlikely(logit))
- ql_log(ql_dbg_io, fcport->vha, 0x5060,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3425,6 +3433,7 @@ check_scsi_status:
case CS_PORT_UNAVAILABLE:
case CS_TIMEOUT:
case CS_RESET:
+ case CS_EDIF_INV_REQ:
/*
* We are going to have the fc class block the rport
@@ -3495,7 +3504,7 @@ check_scsi_status:
out:
if (logit)
- ql_log(ql_dbg_io, fcport->vha, 0x3022,
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
@@ -3711,12 +3720,11 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
* Return: 0 all iocbs has arrived, xx- all iocbs have not arrived.
*/
static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
- struct rsp_que *rsp, response_t *pkt)
+ struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
{
- int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
- response_t *end_pkt;
+ int start_pkt_ring_index;
+ u32 iocb_cnt = 0;
int rc = 0;
- u32 rsp_q_in;
if (pkt->entry_count == 1)
return rc;
@@ -3727,34 +3735,18 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
else
start_pkt_ring_index = rsp->ring_index - 1;
- if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
- rsp->length - 1;
+ if (rsp_q_in < start_pkt_ring_index)
+ /* q in ptr is wrapped */
+ iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
else
- end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
-
- end_pkt = rsp->ring + end_pkt_ring_index;
+ iocb_cnt = rsp_q_in - start_pkt_ring_index;
- /* next pkt = end_pkt + 1 */
- n_ring_index = end_pkt_ring_index + 1;
- if (n_ring_index >= rsp->length)
- n_ring_index = 0;
-
- rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
- rd_reg_dword(rsp->rsp_q_in);
-
- /* rsp_q_in is either wrapped or pointing beyond endpkt */
- if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
- rsp_q_in >= n_ring_index)
- /* all IOCBs arrived. */
- rc = 0;
- else
+ if (iocb_cnt < pkt->entry_count)
rc = -EIO;
- ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
- "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
- __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
- rsp_q_in, rc);
+ ql_dbg(ql_dbg_init, vha, 0x5091,
+ "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
+ __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
return rc;
}
@@ -3771,6 +3763,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ u16 rsp_in = 0, cur_ring_index;
+ int is_shadow_hba;
if (!ha->flags.fw_started)
return;
@@ -3780,8 +3774,20 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
qla_cpu_update(rsp->qpair, smp_processor_id());
}
- while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
+#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
+ do { \
+ _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
+ rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
+ } while (0)
+
+ is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
+
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
+
+ while (rsp->ring_index != rsp_in &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
+ cur_ring_index = rsp->ring_index;
rsp->ring_index++;
if (rsp->ring_index == rsp->length) {
@@ -3893,6 +3899,7 @@ process_err:
}
pure_item = qla27xx_copy_fpin_pkt(vha,
(void **)&pkt, &rsp);
+ __update_rsp_in(is_shadow_hba, rsp, rsp_in);
if (!pure_item)
break;
qla24xx_queue_purex_item(vha, pure_item,
@@ -3900,7 +3907,17 @@ process_err:
break;
case ELS_AUTH_ELS:
- if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ /*
+ * ring_ptr and ring_index were
+ * pre-incremented above. Reset them
+ * back to current. Wait for next
+ * interrupt with all IOCBs to arrive
+ * and re-process.
+ */
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
ql_dbg(ql_dbg_init, vha, 0x5091,
"Defer processing ELS opcode %#x...\n",
purex_entry->els_frame_payload[3]);
@@ -4419,16 +4436,12 @@ msix_register_fail:
}
/* Enable MSI-X vector for response queue update for queue 0 */
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (ha->msixbase && ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
- } else
- if (ha->mqiobase &&
- (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
- ql2xmqsupport))
- ha->mqenable = 1;
+ if (IS_MQUE_CAPABLE(ha) &&
+ (ha->msixbase && ha->mqiobase && ha->max_qpairs))
+ ha->mqenable = 1;
+ else
+ ha->mqenable = 0;
+
ql_dbg(ql_dbg_multiq, vha, 0xc005,
"mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 10d2655ef676..359595a64664 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -9,6 +9,12 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+#ifdef CONFIG_PPC
+#define IS_PPCARCH true
+#else
+#define IS_PPCARCH false
+#endif
+
static struct mb_cmd_name {
uint16_t cmd;
const char *str;
@@ -232,6 +238,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1112,
"mbox[%d]<-0x%04x\n", cnt, *iptr);
wrt_reg_word(optr, *iptr);
+ } else {
+ wrt_reg_word(optr, 0);
}
mboxes >>= 1;
@@ -268,6 +276,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117a,
+ "cmd=%x Timeout.\n", command);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
if (chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -280,12 +294,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_ABORTED;
goto premature_exit;
}
- ql_dbg(ql_dbg_mbx, vha, 0x117a,
- "cmd=%x Timeout.\n", command);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
eeh_delay = ha->flags.eeh_busy ? 1 : 0;
@@ -689,7 +697,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t *mcp = &mc;
u8 semaphore = 0;
#define EXE_FW_FORCE_SEMAPHORE BIT_7
- u8 retry = 3;
+ u8 retry = 5;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
"Entered %s.\n", __func__);
@@ -728,6 +736,9 @@ again:
vha->min_supported_speed =
nv->min_supported_speed;
}
+
+ if (IS_PPCARCH)
+ mcp->mb[11] |= BIT_4;
}
if (ha->flags.exlogins_enabled)
@@ -764,6 +775,12 @@ again:
goto again;
}
+ if (retry) {
+ retry--;
+ ql_dbg(ql_dbg_async, vha, 0x509d,
+ "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
+ goto again;
+ }
ql_dbg(ql_dbg_mbx, vha, 0x1026,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
vha->hw_err_cnt++;
@@ -3029,8 +3046,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
ha->orig_fw_iocb_count = mcp->mb[10];
if (ha->flags.npiv_supported)
ha->max_npiv_vports = mcp->mb[11];
- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha))
+ if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
ha->fw_max_fcf_count = mcp->mb[12];
}
@@ -3052,7 +3068,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
* Kernel context.
*/
int
-qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
+qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
+ u8 *num_entries)
{
int rval;
mbx_cmd_t mc;
@@ -3092,6 +3109,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
+ if (num_entries)
+ *num_entries = pmap[0];
}
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
@@ -5621,7 +5640,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_2|MBX_1|MBX_0;
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
- mcp->in_mb |= MBX_3;
+ mcp->in_mb |= MBX_4|MBX_3;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -6457,6 +6476,54 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+int
+qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
+ struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
+{
+ int rval;
+ dma_addr_t dd_dma;
+ uint size = sizeof(dd->buf);
+ uint16_t options = dd->options;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
+ "Entered %s.\n", __func__);
+
+ dd_dma = dma_map_single(&vha->hw->pdev->dev,
+ dd->buf, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
+ ql_log(ql_log_warn, vha, 0x1194,
+ "Failed to map dma buffer.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memset(dd->buf, 0, size);
+
+ mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
+ mcp->mb[1] = options;
+ mcp->mb[2] = MSW(LSD(dd_dma));
+ mcp->mb[3] = LSW(LSD(dd_dma));
+ mcp->mb[6] = MSW(MSD(dd_dma));
+ mcp->mb[7] = LSW(MSD(dd_dma));
+ mcp->mb[8] = size;
+ mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
+ mcp->buf_size = size;
+ mcp->flags = MBX_DMA_IN;
+ mcp->tov = MBX_TOV_SECONDS * 4;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
+ "Done %s.\n", __func__);
+ }
+
+ dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
+
+ return rval;
+}
+
static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
{
sp->u.iocb_cmd.u.mbx.rc = res;
@@ -6479,23 +6546,21 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
if (!vha->hw->flags.fw_started)
goto done;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
if (!sp)
goto done;
- sp->type = SRB_MB_IOCB;
- sp->name = mb_to_str(mcp->mb[0]);
-
c = &sp->u.iocb_cmd;
- c->timeout = qla2x00_async_iocb_timeout;
init_completion(&c->u.mbx.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->type = SRB_MB_IOCB;
+ sp->name = mb_to_str(mcp->mb[0]);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_mb_sp_done);
memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
- sp->done = qla2x00_async_mb_sp_done;
-
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1018,
@@ -6527,7 +6592,8 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
}
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 1c024055f8c5..16a9f22bb860 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -166,9 +166,13 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
int ret = QLA_SUCCESS;
fc_port_t *fcport;
- if (vha->hw->flags.edif_enabled)
+ if (vha->hw->flags.edif_enabled) {
+ if (DBELL_ACTIVE(vha))
+ qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
+ FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN);
/* delete sessions and flush sa_indexes */
qla2x00_wait_for_sess_deletion(vha);
+ }
if (vha->hw->flags.fw_started)
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
@@ -591,7 +595,6 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
}
kfree(req->outstanding_cmds);
kfree(req);
- req = NULL;
}
static void
@@ -617,7 +620,6 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
mutex_unlock(&ha->vport_lock);
}
kfree(rsp);
- rsp = NULL;
}
int
@@ -965,6 +967,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
+ /* ref: INIT */
sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
if (!sp)
return rval;
@@ -972,9 +975,8 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
sp->type = SRB_CTRL_VP;
sp->name = "ctrl_vp";
sp->comp = &comp;
- sp->done = qla_ctrlvp_sp_done;
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla_ctrlvp_sp_done);
sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
@@ -1008,6 +1010,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
break;
}
done:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 350b0c4346fb..f726eb8449c5 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1787,17 +1787,18 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
struct register_host_info *preg_hsi;
struct new_utsname *p_sysid = NULL;
+ /* ref: INIT */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
sp->type = SRB_FXIOCB_DCMD;
sp->name = "fxdisc";
+ qla2x00_init_async_sp(sp, FXDISC_TIMEOUT,
+ qla2x00_fxdisc_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout;
fdisc = &sp->u.iocb_cmd;
- fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
- qla2x00_init_timer(sp, FXDISC_TIMEOUT);
-
switch (fx_type) {
case FXDISC_GET_CONFIG_INFO:
fdisc->u.fxiocb.flags =
@@ -1898,7 +1899,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
}
fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
- sp->done = qla2x00_fxdisc_sp_done;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
@@ -1974,7 +1974,8 @@ done_unmap_req:
dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
done_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 138ffdb5c92c..02fdeb0d31ec 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -43,7 +43,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
- req.dev_loss_tmo = 0;
+ req.dev_loss_tmo = fcport->dev_loss_tmo;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@@ -70,6 +70,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ fcport->dev_loss_tmo);
+
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
ql_log(ql_log_info, vha, 0x212a,
"PortID:%06x Supports SLER\n", req.port_id);
@@ -167,6 +170,18 @@ out:
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
+static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
+{
+ if (sp->flags & SRB_DMA_VALID) {
+ struct srb_iocb *nvme = &sp->u.iocb_cmd;
+ struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+ dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags &= ~SRB_DMA_VALID;
+ }
+}
+
static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
@@ -183,6 +198,8 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
spin_unlock_irqrestore(&priv->cmd_lock, flags);
fd = priv->fd;
+
+ qla_nvme_ls_unmap(sp, fd);
fd->done(fd, priv->comp_status);
out:
qla2x00_rel_sp(sp);
@@ -353,6 +370,8 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
+ sp->flags |= SRB_DMA_VALID;
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
@@ -360,6 +379,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
+ qla_nvme_ls_unmap(sp, fd);
qla2x00_rel_sp(sp);
return rval;
}
@@ -664,12 +684,8 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
struct blk_mq_queue_map *map)
{
struct scsi_qla_host *vha = lport->private;
- int rc;
- rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
- if (rc)
- ql_log(ql_log_warn, vha, 0x21de,
- "pci map queue failed 0x%x", rc);
+ blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
@@ -707,7 +723,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
.map_queues = qla_nvme_map_queues,
- .max_hw_queues = 8,
+ .max_hw_queues = DEF_NVME_HW_QUEUES,
.max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,
.dma_boundary = 0xFFFFFFFF,
@@ -774,24 +790,51 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
ha = vha->hw;
tmpl = &qla_nvme_fc_transport;
- WARN_ON(vha->nvme_local_port);
+ if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
+ ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
+ ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+ } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
+ ql_log(ql_log_warn, vha, 0xfffd,
+ "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
+ ql2xnvme_queues, (ha->max_qpairs - 1),
+ (ha->max_qpairs - 1));
+ ql2xnvme_queues = ((ha->max_qpairs - 1));
+ }
qla_nvme_fc_transport.max_hw_queues =
- min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
- (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
+ min((uint8_t)(ql2xnvme_queues),
+ (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
+
+ ql_log(ql_log_info, vha, 0xfffb,
+ "Number of NVME queues used for this port: %d\n",
+ qla_nvme_fc_transport.max_hw_queues);
pinfo.node_name = wwn_to_u64(vha->node_name);
pinfo.port_name = wwn_to_u64(vha->port_name);
pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
pinfo.port_id = vha->d_id.b24;
- ql_log(ql_log_info, vha, 0xffff,
- "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
- pinfo.node_name, pinfo.port_name, pinfo.port_id);
- qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
-
- ret = nvme_fc_register_localport(&pinfo, tmpl,
- get_device(&ha->pdev->dev), &vha->nvme_local_port);
+ mutex_lock(&ha->vport_lock);
+ /*
+ * Check again for nvme_local_port to see if any other thread raced
+ * with this one and finished registration.
+ */
+ if (!vha->nvme_local_port) {
+ ql_log(ql_log_info, vha, 0xffff,
+ "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
+ pinfo.node_name, pinfo.port_name, pinfo.port_id);
+ qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+ ret = nvme_fc_register_localport(&pinfo, tmpl,
+ get_device(&ha->pdev->dev),
+ &vha->nvme_local_port);
+ mutex_unlock(&ha->vport_lock);
+ } else {
+ mutex_unlock(&ha->vport_lock);
+ return 0;
+ }
if (ret) {
ql_log(ql_log_warn, vha, 0xffff,
"register_localport failed: ret=%x\n", ret);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index f81f219c7c7d..d299478371b2 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -13,6 +13,9 @@
#include "qla_def.h"
#include "qla_dsd.h"
+#define MIN_NVME_HW_QUEUES 1
+#define DEF_NVME_HW_QUEUES 8
+
#define NVME_ATIO_CMD_OFF 32
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
#define Q2T_NVME_NUM_TAGS 2048
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 11aad97dfca8..6dfb70edb9a6 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -335,20 +335,20 @@ static unsigned qla82xx_crb_hub_agt[64] = {
};
/* Device states */
-static char *q_dev_state[] = {
- "Unknown",
- "Cold",
- "Initializing",
- "Ready",
- "Need Reset",
- "Need Quiescent",
- "Failed",
- "Quiescent",
+static const char *const q_dev_state[] = {
+ [QLA8XXX_DEV_UNKNOWN] = "Unknown",
+ [QLA8XXX_DEV_COLD] = "Cold/Re-init",
+ [QLA8XXX_DEV_INITIALIZING] = "Initializing",
+ [QLA8XXX_DEV_READY] = "Ready",
+ [QLA8XXX_DEV_NEED_RESET] = "Need Reset",
+ [QLA8XXX_DEV_NEED_QUIESCENT] = "Need Quiescent",
+ [QLA8XXX_DEV_FAILED] = "Failed",
+ [QLA8XXX_DEV_QUIESCENT] = "Quiescent",
};
-char *qdev_state(uint32_t dev_state)
+const char *qdev_state(uint32_t dev_state)
{
- return q_dev_state[dev_state];
+ return (dev_state < MAX_STATES) ? q_dev_state[dev_state] : "Unknown";
}
/*
@@ -3061,8 +3061,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x00b6,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA8XXX_DEV_INITIALIZING &&
@@ -3185,8 +3184,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
old_dev_state = dev_state;
ql_log(ql_log_info, vha, 0x009b,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
@@ -3207,9 +3205,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
if (loopcount < 5) {
ql_log(ql_log_info, vha, 0x009d,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) :
- "Unknown");
+ dev_state, qdev_state(dev_state));
}
switch (dev_state) {
@@ -3439,8 +3435,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
} else
ql_log(ql_log_info, vha, 0xb031,
"Device state is 0x%x = %s.\n",
- dev_state,
- dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
}
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 8567eaf1bddd..6dc80c8ddf79 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -540,14 +540,18 @@
#define QLA82XX_CRB_DRV_IDC_VERSION (QLA82XX_CAM_RAM(0x174))
/* Every driver should use these Device State */
-#define QLA8XXX_DEV_COLD 1
-#define QLA8XXX_DEV_INITIALIZING 2
-#define QLA8XXX_DEV_READY 3
-#define QLA8XXX_DEV_NEED_RESET 4
-#define QLA8XXX_DEV_NEED_QUIESCENT 5
-#define QLA8XXX_DEV_FAILED 6
-#define QLA8XXX_DEV_QUIESCENT 7
-#define MAX_STATES 8 /* Increment if new state added */
+enum {
+ QLA8XXX_DEV_UNKNOWN,
+ QLA8XXX_DEV_COLD,
+ QLA8XXX_DEV_INITIALIZING,
+ QLA8XXX_DEV_READY,
+ QLA8XXX_DEV_NEED_RESET,
+ QLA8XXX_DEV_NEED_QUIESCENT,
+ QLA8XXX_DEV_FAILED,
+ QLA8XXX_DEV_QUIESCENT,
+ MAX_STATES, /* Increment if new state added */
+};
+
#define QLA8XXX_BAD_VALUE 0xbad0bad0
#define QLA82XX_IDC_VERSION 1
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 5ceecc9642fc..41ff6fbdb933 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1938,8 +1938,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
"Device state is 0x%x = %s\n",
- dev_state, dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
@@ -1952,8 +1951,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
ql_log(ql_log_warn, vha, 0xb0cf,
"%s: Device Init Failed 0x%x = %s\n",
QLA2XXX_DRIVER_NAME, dev_state,
- dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ qdev_state(dev_state));
qla8044_wr_direct(vha,
QLA8044_CRB_DEV_STATE_INDEX,
QLA8XXX_DEV_FAILED);
@@ -1963,8 +1961,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
ql_log(ql_log_info, vha, 0xb0d0,
"Device state is 0x%x = %s\n",
- dev_state, dev_state < MAX_STATES ?
- qdev_state(dev_state) : "Unknown");
+ dev_state, qdev_state(dev_state));
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index abcd30917263..2c85f3cce726 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -15,6 +15,8 @@
#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
+#include <linux/trace_events.h>
+#include <linux/trace.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -35,6 +37,8 @@ static int apidev_major;
*/
struct kmem_cache *srb_cachep;
+static struct trace_array *qla_trc_array;
+
int ql2xfulldump_on_mpifail;
module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
@@ -117,6 +121,11 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
+int ql2xextended_error_logging_ktrace = 1;
+module_param(ql2xextended_error_logging_ktrace, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xextended_error_logging_ktrace,
+ "Same BIT definition as ql2xextended_error_logging, but used to control logging to kernel trace buffer (default=1).\n");
+
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
MODULE_PARM_DESC(ql2xshiftctondsd,
@@ -333,11 +342,23 @@ MODULE_PARM_DESC(ql2xabts_wait_nvme,
"To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
+static u32 ql2xdelay_before_pci_error_handling = 5;
+module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
+MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
+ "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
+u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
+module_param(ql2xnvme_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(ql2xnvme_queues,
+ "Number of NVMe Queues that can be configured.\n"
+ "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
+ "1 - Minimum number of queues supported\n"
+ "8 - Default value");
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
@@ -728,9 +749,10 @@ void qla2x00_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- sp->free(sp);
+ /* kref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
- CMD_SP(cmd) = NULL;
+ sp->type = 0;
scsi_done(cmd);
if (comp)
complete(comp);
@@ -819,9 +841,10 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
struct completion *comp = sp->comp;
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
cmd->result = res;
- CMD_SP(cmd) = NULL;
+ sp->type = 0;
scsi_done(cmd);
if (comp)
complete(comp);
@@ -919,12 +942,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
+ /* ref: INIT */
qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
-
- CMD_SP(cmd) = (void *)sp;
sp->free = qla2x00_sp_free_dma;
sp->done = qla2x00_sp_compl;
@@ -938,7 +960,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1008,11 +1031,11 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
goto qc24_target_busy;
sp = scsi_cmd_priv(cmd);
+ /* ref: INIT */
qla2xxx_init_sp(sp, vha, qpair, fcport);
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
- CMD_SP(cmd) = (void *)sp;
sp->free = qla2xxx_qpair_sp_free_dma;
sp->done = qla2xxx_qpair_sp_compl;
@@ -1026,7 +1049,8 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
return 0;
qc24_host_busy_free_sp:
- sp->free(sp);
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
qc24_target_busy:
return SCSI_MLQUEUE_TARGET_BUSY;
@@ -1057,6 +1081,7 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
unsigned long wait_iter = ABORT_WAIT_ITER;
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
+ srb_t *sp = scsi_cmd_priv(cmd);
int ret = QLA_SUCCESS;
if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
@@ -1065,10 +1090,9 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
return ret;
}
- while (CMD_SP(cmd) && wait_iter--) {
+ while (sp->type && wait_iter--)
msleep(ABORT_POLLING_PERIOD);
- }
- if (CMD_SP(cmd))
+ if (sp->type)
ret = QLA_FUNCTION_FAILED;
return ret;
@@ -1327,21 +1351,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
-int
-qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
- uint64_t l, enum nexus_wait_type type)
+static int
+__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
{
int cnt, match, status;
unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req;
+ scsi_qla_host_t *vha = qpair->vha;
+ struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
status = QLA_SUCCESS;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = vha->req;
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (cnt = 1; status == QLA_SUCCESS &&
cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
@@ -1368,12 +1391,32 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
if (!match)
continue;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
status = qla2x00_eh_wait_on_command(cmd);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ return status;
+}
+
+int
+qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
+ uint64_t l, enum nexus_wait_type type)
+{
+ struct qla_qpair *qpair;
+ struct qla_hw_data *ha = vha->hw;
+ int i, status = QLA_SUCCESS;
+ status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
+ type);
+ for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
+ qpair = ha->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
+ type);
+ }
return status;
}
@@ -1410,7 +1453,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
@@ -1478,7 +1521,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
return err;
if (fcport->deleted)
- return SUCCESS;
+ return FAILED;
ql_log(ql_log_info, vha, 0x8009,
"TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
@@ -2805,6 +2848,27 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
spin_unlock_irqrestore(&vha->work_lock, flags);
}
+static void
+qla_trace_init(void)
+{
+ qla_trc_array = trace_array_get_by_name("qla2xxx");
+ if (!qla_trc_array) {
+ ql_log(ql_log_fatal, NULL, 0x0001,
+ "Unable to create qla2xxx trace instance, instance logging will be disabled.\n");
+ return;
+ }
+
+ QLA_TRACE_ENABLE(qla_trc_array);
+}
+
+static void
+qla_trace_uninit(void)
+{
+ if (!qla_trc_array)
+ return;
+ trace_array_put(qla_trc_array);
+}
+
/*
* PCI driver interface
*/
@@ -3486,7 +3550,7 @@ skip_dpc:
qla_dual_mode_enabled(base_vha))
scsi_scan_host(host);
else
- ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ ql_log(ql_log_info, base_vha, 0x0122,
"skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@@ -3748,8 +3812,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
if (ha->mqiobase)
iounmap(ha->mqiobase);
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
- ha->msixbase)
+ if (ha->msixbase)
iounmap(ha->msixbase);
}
}
@@ -3891,6 +3954,8 @@ qla24xx_free_purex_list(struct purex_list *list)
spin_lock_irqsave(&list->lock, flags);
list_for_each_entry_safe(item, next, &list->head, list) {
list_del(&item->list);
+ if (item == &item->vha->default_item)
+ continue;
kfree(item);
}
spin_unlock_irqrestore(&list->lock, flags);
@@ -3922,7 +3987,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
/* Flush the work queue and remove it */
if (ha->wq) {
- flush_workqueue(ha->wq);
destroy_workqueue(ha->wq);
ha->wq = NULL;
}
@@ -4949,7 +5013,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
INIT_LIST_HEAD(&vha->qla_cmd_list);
- INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
@@ -5463,7 +5526,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
e->u.fcport.fcport, false);
break;
case QLA_EVT_SA_REPLACE:
- qla24xx_issue_sa_replace_iocb(vha, e);
+ rc = qla24xx_issue_sa_replace_iocb(vha, e);
break;
}
@@ -5527,6 +5590,11 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.fcport = fcport;
qla24xx_handle_relogin_event(vha, &ea);
} else if (vha->hw->current_topology ==
+ ISP_CFG_NL &&
+ IS_QLA2XXX_MIDTYPE(vha->hw)) {
+ (void)qla24xx_fcport_handle_login(vha,
+ fcport);
+ } else if (vha->hw->current_topology ==
ISP_CFG_NL) {
fcport->login_retry--;
status =
@@ -7199,7 +7267,7 @@ skip:
return do_heartbeat;
}
-static void qla_heart_beat(struct scsi_qla_host *vha)
+static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
{
struct qla_hw_data *ha = vha->hw;
@@ -7209,8 +7277,57 @@ static void qla_heart_beat(struct scsi_qla_host *vha)
if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
return;
- if (qla_do_heartbeat(vha))
+ /*
+ * dpc thread cannot run if heartbeat is running at the same time.
+ * We also do not want to starve heartbeat task. Therefore, do
+ * heartbeat task at least once every 5 seconds.
+ */
+ if (dpc_started &&
+ time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
+ return;
+
+ if (qla_do_heartbeat(vha)) {
+ ha->last_heartbeat_run_jiffies = jiffies;
queue_work(ha->wq, &ha->heartbeat_work);
+ }
+}
+
+static void qla_wind_down_chip(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->flags.eeh_busy)
+ return;
+ if (ha->pci_error_state)
+ /* system is trying to recover */
+ return;
+
+ /*
+ * Current system is not handling PCIE error. At this point, this is
+ * best effort to wind down the adapter.
+ */
+ if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
+ !ha->flags.eeh_flush) {
+ ql_log(ql_log_info, vha, 0x9009,
+ "PCI Error detected, attempting to reset hardware.\n");
+
+ ha->isp_ops->reset_chip(vha);
+ ha->isp_ops->disable_intrs(ha);
+
+ ha->flags.eeh_flush = EEH_FLUSH_RDY;
+ ha->eeh_jif = jiffies;
+
+ } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
+ time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) {
+ pci_clear_master(ha->pdev);
+
+ /* flush all command */
+ qla2x00_abort_isp_cleanup(vha);
+ ha->flags.eeh_flush = EEH_FLUSH_DONE;
+
+ ql_log(ql_log_info, vha, 0x900a,
+ "PCI Error handling complete, all IOs aborted.\n");
+ }
}
/**************************************************************************
@@ -7236,6 +7353,8 @@ qla2x00_timer(struct timer_list *t)
fc_port_t *fcport = NULL;
if (ha->flags.eeh_busy) {
+ qla_wind_down_chip(vha);
+
ql_dbg(ql_dbg_timer, vha, 0x6000,
"EEH = %d, restarting timer.\n",
ha->flags.eeh_busy);
@@ -7401,6 +7520,8 @@ qla2x00_timer(struct timer_list *t)
start_dpc++;
}
+ /* borrowing w to signify dpc will run */
+ w = 0;
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
@@ -7433,9 +7554,10 @@ qla2x00_timer(struct timer_list *t)
test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
+ w = 1;
}
- qla_heart_beat(vha);
+ qla_heart_beat(vha, w);
qla2x00_restart_timer(vha, WATCH_INTERVAL);
}
@@ -7633,7 +7755,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
switch (state) {
case pci_channel_io_normal:
- ha->flags.eeh_busy = 0;
+ qla_pci_set_eeh_busy(vha);
if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -7674,9 +7796,16 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
"mmio enabled\n");
ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
+
if (IS_QLA82XX(ha))
return PCI_ERS_RESULT_RECOVERED;
+ if (qla2x00_isp_reg_stat(ha)) {
+ ql_log(ql_log_info, base_vha, 0x803f,
+ "During mmio enabled, PCI/Register disconnect still detected.\n");
+ goto out;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if (IS_QLA2100(ha) || IS_QLA2200(ha)){
stat = rd_reg_word(&reg->hccr);
@@ -7698,6 +7827,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
"RISC paused -- mmio_enabled, Dumping firmware.\n");
qla2xxx_dump_fw(base_vha);
}
+out:
/* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
ql_dbg(ql_dbg_aer, base_vha, 0x600d,
"mmio enabled returning.\n");
@@ -7805,6 +7935,9 @@ void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
spin_lock_irqsave(&base_vha->work_lock, flags);
if (!ha->flags.eeh_busy) {
+ ha->eeh_jif = jiffies;
+ ha->flags.eeh_flush = 0;
+
ha->flags.eeh_busy = 1;
do_cleanup = true;
}
@@ -7881,17 +8014,15 @@ qla_pci_reset_done(struct pci_dev *pdev)
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
-static int qla2xxx_map_queues(struct Scsi_Host *shost)
+static void qla2xxx_map_queues(struct Scsi_Host *shost)
{
- int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
- rc = blk_mq_map_queues(qmap);
+ blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
- return rc;
+ blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
}
struct scsi_host_template qla2xxx_driver_template = {
@@ -8078,6 +8209,8 @@ qla2x00_module_init(void)
BUILD_BUG_ON(sizeof(sw_info_t) != 32);
BUILD_BUG_ON(sizeof(target_id_t) != 2);
+ qla_trace_init();
+
/* Allocate cache for SRBs. */
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
SLAB_HWCACHE_ALIGN, NULL);
@@ -8156,6 +8289,8 @@ qlt_exit:
destroy_cache:
kmem_cache_destroy(srb_cachep);
+
+ qla_trace_uninit();
return ret;
}
@@ -8174,6 +8309,7 @@ qla2x00_module_exit(void)
fc_release_transport(qla2xxx_transport_template);
qlt_exit();
kmem_cache_destroy(srb_cachep);
+ qla_trace_uninit();
}
module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index a0aeba69513d..c092a6b1ced4 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -844,7 +844,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_nvram = start;
break;
case FLT_REG_IMG_PRI_27XX:
- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
ha->flt_region_img_status_pri = start;
break;
case FLT_REG_IMG_SEC_27XX:
@@ -1356,7 +1356,7 @@ next:
flash_data_addr(ha, faddr), le32_to_cpu(*dwptr));
if (ret) {
ql_dbg(ql_dbg_user, vha, 0x7006,
- "Failed slopw write %x (%x)\n", faddr, *dwptr);
+ "Failed slow write %x (%x)\n", faddr, *dwptr);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8993d438e0b7..bb754a950802 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -48,13 +48,6 @@ MODULE_PARM_DESC(qlini_mode,
"when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql_dm_tgt_ex_pct = 0;
-module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
- "For Dual Mode (qlini_mode=dual), this parameter determines "
- "the percentage of exchanges/cmds FW will allocate resources "
- "for Target mode.");
-
int ql2xuctrlirq = 1;
module_param(ql2xuctrlirq, int, 0644);
MODULE_PARM_DESC(ql2xuctrlirq,
@@ -620,7 +613,7 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
@@ -656,12 +649,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->type = type;
sp->name = "nack";
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
+ qla2x00_async_nack_sp_done);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
- sp->done = qla2x00_async_nack_sp_done;
ql_dbg(ql_dbg_disc, vha, 0x20f4,
"Async-%s %8phC hndl %x %s\n",
@@ -674,7 +665,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(sp);
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
@@ -990,22 +981,6 @@ void qlt_free_session_done(struct work_struct *work)
sess->send_els_logo);
if (!IS_SW_RESV_ADDR(sess->d_id)) {
- if (ha->flags.edif_enabled &&
- (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
- sess->edif.authok = 0;
- if (!ha->flags.host_shutting_down) {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
- __func__, sess->port_name);
- qla2x00_release_all_sadb(vha, sess);
- } else {
- ql_dbg(ql_dbg_edif, vha, 0x911e,
- "%s bypassing release_all_sadb\n",
- __func__);
- }
- qla_edif_clear_appdata(vha, sess);
- qla_edif_sess_down(vha, sess);
- }
qla2x00_mark_device_lost(vha, sess, 0);
if (sess->send_els_logo) {
@@ -1051,6 +1026,25 @@ void qlt_free_session_done(struct work_struct *work)
sess->nvme_flag |= NVME_FLAG_DELETING;
qla_nvme_unregister_remote_port(sess);
}
+
+ if (ha->flags.edif_enabled &&
+ (!own || (own &&
+ own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ sess->edif.authok = 0;
+ if (!ha->flags.host_shutting_down) {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
+ __func__, sess->port_name);
+ qla2x00_release_all_sadb(vha, sess);
+ } else {
+ ql_dbg(ql_dbg_edif, vha, 0x911e,
+ "%s bypassing release_all_sadb\n",
+ __func__);
+ }
+
+ qla_edif_clear_appdata(vha, sess);
+ qla_edif_sess_down(vha, sess);
+ }
}
/*
@@ -1563,11 +1557,11 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
"Waiting for sess works (tgt %p)", tgt);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- while (!list_empty(&tgt->sess_works_list)) {
+ do {
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
- flush_scheduled_work();
+ flush_work(&tgt->sess_work);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
- }
+ } while (!list_empty(&tgt->sess_works_list));
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
@@ -2026,17 +2020,6 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
key = sid_to_key(s_id);
spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- uint32_t op_key;
- u64 op_lun;
-
- op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
- op_lun = scsilun_to_int(
- (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
- if (op_key == key && op_lun == lun)
- op->aborted = true;
- }
-
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key;
u64 op_lun;
@@ -2168,8 +2151,10 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
le32_to_cpu(abts->exchange_addr_to_abort));
- if (!abort_cmd)
+ if (!abort_cmd) {
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
return -EIO;
+ }
mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
if (abort_cmd->qpair) {
@@ -3320,6 +3305,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
+ res = 0;
goto out_unmap_unlock;
}
@@ -3838,6 +3824,9 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
@@ -3875,8 +3864,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->sg_mapped);
cmd->jiffies_at_free = get_jiffies_64();
- if (unlikely(cmd->free_sg))
- kfree(cmd->sg);
if (!sess || !sess->se_sess) {
WARN_ON(1);
@@ -4727,15 +4714,6 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
((u32)s_id->b.al_pa));
spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
-
- if (op_key == key) {
- op->aborted = true;
- count++;
- }
- }
-
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
@@ -6358,69 +6336,6 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
-static void qlt_tmr_work(struct qla_tgt *tgt,
- struct qla_tgt_sess_work_param *prm)
-{
- struct atio_from_isp *a = &prm->tm_iocb2;
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- unsigned long flags;
- be_id_t s_id;
- int rc;
- u64 unpacked_lun;
- int fn;
- void *iocb;
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term2;
-
- s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
- if (!sess) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has got an extra creation ref */
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- if (!sess)
- goto out_term2;
- } else {
- if (sess->deleted) {
- goto out_term2;
- }
-
- if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
- "%s: kref_get fail %8phC\n",
- __func__, sess->port_name);
- goto out_term2;
- }
- }
-
- iocb = a;
- fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
- unpacked_lun =
- scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
-
- rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- ha->tgt.tgt_ops->put_sess(sess);
-
- if (rc != 0)
- goto out_term;
- return;
-
-out_term2:
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-out_term:
- qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
-}
-
static void qlt_sess_work_fn(struct work_struct *work)
{
struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
@@ -6447,9 +6362,6 @@ static void qlt_sess_work_fn(struct work_struct *work)
case QLA_TGT_SESS_WORK_ABORT:
qlt_abort_work(tgt, prm);
break;
- case QLA_TGT_SESS_WORK_TM:
- qlt_tmr_work(tgt, prm);
- break;
default:
BUG_ON(1);
break;
@@ -6536,7 +6448,6 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
- INIT_LIST_HEAD(&tgt->del_sess_list);
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
@@ -6959,14 +6870,8 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
if (ha->flags.msix_enabled) {
if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- if (IS_QLA2071(ha)) {
- /* 4 ports Baker: Enable Interrupt Handshake */
- icb->msix_atio = 0;
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- } else {
- icb->msix_atio = cpu_to_le16(msix->entry);
- icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
- }
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
@@ -7221,8 +7126,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
- if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) {
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 156b950ca7e7..7df86578214f 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -813,9 +813,6 @@ struct qla_tgt {
/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
int sess_count;
- /* Protected by hardware_lock */
- struct list_head del_sess_list;
-
spinlock_t sess_work_lock;
struct list_head sess_works_list;
struct work_struct sess_work;
@@ -883,7 +880,6 @@ struct qla_tgt_cmd {
/* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
- unsigned int free_sg:1;
unsigned int write_data_transferred:1;
unsigned int q_full:1;
unsigned int term_exchg:1;
@@ -946,7 +942,6 @@ struct qla_tgt_sess_work_param {
struct list_head sess_works_list_entry;
#define QLA_TGT_SESS_WORK_ABORT 1
-#define QLA_TGT_SESS_WORK_TM 2
int type;
union {
@@ -1080,8 +1075,6 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_81xx *);
extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_81xx *);
-extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
- struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
struct vp_config_entry_24xx *);
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 26c13a953b97..b0a74b036cf4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -435,8 +435,13 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20a,
"%s: reset risc [%lx]\n", __func__, *len);
- if (buf)
- WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS);
+ if (buf) {
+ if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0x5001,
+ "%s: unable to soft reset\n", __func__);
+ return INVALID_ENTRY;
+ }
+ }
return qla27xx_next_entry(ent);
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 27e440f8a702..03f3e2cd62b5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.200-k"
+#define QLA2XXX_VERSION "10.02.07.900-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 200
+#define QLA_DRIVER_BETA_VER 900
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 69a590546bf9..5f82c8afd5e0 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -216,11 +216,21 @@
#define IDC_COMP_TOV 5
#define LINK_UP_COMP_TOV 30
-#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
+/*
+ * Note: the data structure below does not have a struct iscsi_cmd member since
+ * the qla4xxx driver does not use libiscsi for SCSI I/O.
+ */
+struct qla4xxx_cmd_priv {
+ struct srb *srb;
+};
+
+static inline struct qla4xxx_cmd_priv *qla4xxx_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/*
- * SCSI Request Block structure (srb) that is placed
- * on cmd->SCp location of every I/O [We have 22 bytes available]
+ * SCSI Request Block structure (srb) that is associated with each scsi_cmnd.
*/
struct srb {
struct list_head list; /* (8) */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 4e1764df0a73..860ec61b51b9 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1028,7 +1028,7 @@ struct crash_record {
uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
uint8_t in_RISC_reg_dump[256]; /*180 -27F */
- uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */
+ uint8_t in_out_RISC_stack_dump[]; /*280 - ??? */
};
struct conn_event_log_entry {
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 8987acc24dac..9e849f6b0d0f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -226,6 +226,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.name = DRIVER_NAME,
.proc_name = DRIVER_NAME,
.queuecommand = qla4xxx_queuecommand,
+ .cmd_size = sizeof(struct qla4xxx_cmd_priv),
.eh_abort_handler = qla4xxx_eh_abort,
.eh_device_reset_handler = qla4xxx_eh_device_reset,
@@ -670,7 +671,6 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
goto exit_chap_list;
}
- memset(ha->chap_list, 0, chap_size);
memcpy(ha->chap_list, chap_flash_data, chap_size);
exit_chap_list:
@@ -3639,7 +3639,6 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
struct dev_db_entry *fw_ddb_entry)
{
uint16_t options;
- int rc = 0;
options = le16_to_cpu(fw_ddb_entry->options);
SET_BITVAL(conn->is_fw_assigned_ipv6, options, BIT_11);
@@ -3738,7 +3737,7 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
COPY_ISID(fw_ddb_entry->isid, sess->isid);
- return rc;
+ return 0;
}
static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
@@ -4054,7 +4053,7 @@ static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
srb->ddb = ddb_entry;
srb->cmd = cmd;
srb->flags = 0;
- CMD_SP(cmd) = (void *)srb;
+ qla4xxx_cmd_priv(cmd)->srb = srb;
return srb;
}
@@ -4067,7 +4066,7 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
scsi_dma_unmap(cmd);
srb->flags &= ~SRB_DMA_VALID;
}
- CMD_SP(cmd) = NULL;
+ qla4xxx_cmd_priv(cmd)->srb = NULL;
}
void qla4xxx_srb_compl(struct kref *ref)
@@ -4640,7 +4639,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
* the scsi/block layer is going to prevent
* the tag from being released.
*/
- if (cmd != NULL && CMD_SP(cmd))
+ if (cmd != NULL && qla4xxx_cmd_priv(cmd)->srb)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5096,7 +5095,7 @@ int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
" start scan\n", ha->host_no, __func__,
ddb_entry->fw_ddb_index);
- scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ queue_work(ddb_entry->sess->workq, &ddb_entry->sess->scan_work);
}
return QLA_SUCCESS;
}
@@ -5734,7 +5733,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
@@ -5843,7 +5842,7 @@ qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
(char *)&boot_conn->chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
@@ -9079,7 +9078,7 @@ struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
if (!cmd)
return srb;
- srb = (struct srb *)CMD_SP(cmd);
+ srb = qla4xxx_cmd_priv(cmd)->srb;
if (!srb)
return srb;
@@ -9121,7 +9120,7 @@ static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha,
do {
/* Checking to see if its returned to OS */
- rp = (struct srb *) CMD_SP(cmd);
+ rp = qla4xxx_cmd_priv(cmd)->srb;
if (rp == NULL) {
done++;
break;
@@ -9215,7 +9214,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- srb = (struct srb *) CMD_SP(cmd);
+ srb = qla4xxx_cmd_priv(cmd)->srb;
if (!srb) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 8f709002f746..8f05e3707d69 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -31,8 +31,12 @@
#include <asm/irq.h>
#include <asm/dma.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "qlogicfas408.h"
/* Set the following to 2 to use normal interrupt (active high/totempole-
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 30a88849a626..3e065d5fc80c 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -55,8 +55,12 @@
#include <asm/irq.h>
#include <asm/dma.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "qlogicfas408.h"
/*----------------------------------------------------------------*/
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 57f2f4135a06..8c961ff03fcd 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -909,7 +909,8 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd,
sg_count = dma_map_sg(&qpti->op->dev, sg,
scsi_sg_count(Cmnd),
Cmnd->sc_data_direction);
-
+ if (!sg_count)
+ return -1;
ds = cmd->dataseg;
cmd->segment_cnt = sg_count;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f6af1562cba4..c59eac7a32f2 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -55,7 +55,6 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -201,11 +200,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
/*
- * 1024 is big enough for saturating the fast scsi LUN now
+ * 4096 is big enough for saturating fast SCSI LUNs.
*/
int scsi_device_max_queue_depth(struct scsi_device *sdev)
{
- return max_t(int, sdev->host->can_queue, 1024);
+ return min_t(int, sdev->host->can_queue, 4096);
}
/**
@@ -322,6 +321,31 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
return get_unaligned_be16(&buffer[2]) + 4;
}
+static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+{
+ unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ int result;
+
+ /*
+ * Fetch the VPD page header to find out how big the page
+ * is. This is done to prevent problems on legacy devices
+ * which can not handle allocation lengths as large as
+ * potentially requested by the caller.
+ */
+ result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+ if (result < 0)
+ return 0;
+
+ if (result < SCSI_VPD_HEADER_SIZE) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: short VPD page 0x%02x length: %d bytes\n",
+ __func__, page, result);
+ return 0;
+ }
+
+ return result;
+}
+
/**
* scsi_get_vpd_page - Get Vital Product Data from a SCSI device
* @sdev: The device to ask
@@ -331,47 +355,38 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
*
* SCSI devices may optionally supply Vital Product Data. Each 'page'
* of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
- * If the device supports this VPD page, this routine returns a pointer
- * to a buffer containing the data from that page. The caller is
- * responsible for calling kfree() on this pointer when it is no longer
- * needed. If we cannot retrieve the VPD page this routine returns %NULL.
+ * If the device supports this VPD page, this routine fills @buf
+ * with the data from that page and return 0. If the VPD page is not
+ * supported or its content cannot be retrieved, -EINVAL is returned.
*/
int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
int buf_len)
{
- int i, result;
-
- if (sdev->skip_vpd_pages)
- goto fail;
-
- /* Ask for all the pages supported by this device */
- result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
- if (result < 4)
- goto fail;
+ int result, vpd_len;
- /* If the user actually wanted this page, we can skip the rest */
- if (page == 0)
- return 0;
+ if (!scsi_device_supports_vpd(sdev))
+ return -EINVAL;
- for (i = 4; i < min(result, buf_len); i++)
- if (buf[i] == page)
- goto found;
+ vpd_len = scsi_get_vpd_size(sdev, page);
+ if (vpd_len <= 0)
+ return -EINVAL;
- if (i < result && i >= buf_len)
- /* ran off the end of the buffer, give us benefit of doubt */
- goto found;
- /* The device claims it doesn't support the requested page */
- goto fail;
+ vpd_len = min(vpd_len, buf_len);
- found:
- result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
+ /*
+ * Fetch the actual page. Since the appropriate size was reported
+ * by the device it is now safe to ask for something bigger.
+ */
+ memset(buf, 0, buf_len);
+ result = scsi_vpd_inquiry(sdev, buf, page, vpd_len);
if (result < 0)
- goto fail;
+ return -EINVAL;
+ else if (result > vpd_len)
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: VPD page 0x%02x result %d > %d bytes\n",
+ __func__, page, result, vpd_len);
return 0;
-
- fail:
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
@@ -385,9 +400,17 @@ EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
static struct scsi_vpd *scsi_get_vpd_buf(struct scsi_device *sdev, u8 page)
{
struct scsi_vpd *vpd_buf;
- int vpd_len = SCSI_VPD_PG_LEN, result;
+ int vpd_len, result;
+
+ vpd_len = scsi_get_vpd_size(sdev, page);
+ if (vpd_len <= 0)
+ return NULL;
retry_pg:
+ /*
+ * Fetch the actual page. Since the appropriate size was reported
+ * by the device it is now safe to ask for something bigger.
+ */
vpd_buf = kmalloc(sizeof(*vpd_buf) + vpd_len, GFP_KERNEL);
if (!vpd_buf)
return NULL;
@@ -398,6 +421,9 @@ retry_pg:
return NULL;
}
if (result > vpd_len) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: VPD page 0x%02x result %d > %d bytes\n",
+ __func__, page, result, vpd_len);
vpd_len = result;
kfree(vpd_buf);
goto retry_pg;
@@ -457,6 +483,12 @@ void scsi_attach_vpd(struct scsi_device *sdev)
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
if (vpd_buf->data[i] == 0x89)
scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
+ if (vpd_buf->data[i] == 0xb0)
+ scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
+ if (vpd_buf->data[i] == 0xb1)
+ scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
+ if (vpd_buf->data[i] == 0xb2)
+ scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
}
kfree(vpd_buf);
}
@@ -477,21 +509,30 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
- int result;
+ int result, request_len;
if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
return -EINVAL;
+ /* RSOC header + size of command we are asking about */
+ request_len = 4 + COMMAND_SIZE(opcode);
+ if (request_len > len) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: len %u bytes, opcode 0x%02x needs %u\n",
+ __func__, len, opcode, request_len);
+ return -EINVAL;
+ }
+
memset(cmd, 0, 16);
cmd[0] = MAINTENANCE_IN;
cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
cmd[2] = 1; /* One command format */
cmd[3] = opcode;
- put_unaligned_be32(len, &cmd[6]);
+ put_unaligned_be32(request_len, &cmd[6]);
memset(buffer, 0, len);
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
- &sshdr, 30 * HZ, 3, NULL);
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
+ request_len, &sshdr, 30 * HZ, 3, NULL);
if (result < 0)
return result;
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
deleted file mode 100644
index 4fd75a3aff66..000000000000
--- a/drivers/scsi/scsi.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * scsi.h Copyright (C) 1992 Drew Eckhardt
- * Copyright (C) 1993, 1994, 1995, 1998, 1999 Eric Youngdale
- * generic SCSI package header file by
- * Initial versions: Drew Eckhardt
- * Subsequent revisions: Eric Youngdale
- *
- * <drew@colorado.edu>
- *
- * Modified by Eric Youngdale eric@andante.org to
- * add scatter-gather, multiple outstanding request, and other
- * enhancements.
- */
-/*
- * NOTE: this file only contains compatibility glue for old drivers. All
- * these wrappers will be removed sooner or later. For new code please use
- * the interfaces declared in the headers in include/scsi/
- */
-
-#ifndef _SCSI_H
-#define _SCSI_H
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi.h>
-
-/*
- * Some defs, in case these are not defined elsewhere.
- */
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-struct Scsi_Host;
-struct scsi_cmnd;
-struct scsi_device;
-struct scsi_target;
-struct scatterlist;
-
-#endif /* _SCSI_H */
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 081b84bb7985..96ee35256a16 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -12,7 +12,7 @@
static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
fmode_t mode, unsigned int timeout)
{
- struct scsi_request *sreq;
+ struct scsi_cmnd *scmd;
struct request *rq;
struct bio *bio;
int ret;
@@ -31,21 +31,19 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
return PTR_ERR(rq);
rq->timeout = timeout;
- ret = -ENOMEM;
- sreq = scsi_req(rq);
- sreq->cmd_len = hdr->request_len;
- if (sreq->cmd_len > BLK_MAX_CDB) {
- sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
- if (!sreq->cmd)
- goto out_put_request;
+ scmd = blk_mq_rq_to_pdu(rq);
+ scmd->cmd_len = hdr->request_len;
+ if (scmd->cmd_len > sizeof(scmd->cmnd)) {
+ ret = -EINVAL;
+ goto out_put_request;
}
ret = -EFAULT;
- if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
- goto out_free_cmd;
+ if (copy_from_user(scmd->cmnd, uptr64(hdr->request), scmd->cmd_len))
+ goto out_put_request;
ret = -EPERM;
- if (!scsi_cmd_allowed(sreq->cmd, mode))
- goto out_free_cmd;
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
+ goto out_put_request;
ret = 0;
if (hdr->dout_xfer_len) {
@@ -57,43 +55,42 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
}
if (ret)
- goto out_free_cmd;
+ goto out_put_request;
bio = rq->bio;
- blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+ blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* fill in all the output members
*/
- hdr->device_status = sreq->result & 0xff;
- hdr->transport_status = host_byte(sreq->result);
+ hdr->device_status = scmd->result & 0xff;
+ hdr->transport_status = host_byte(scmd->result);
hdr->driver_status = 0;
- if (scsi_status_is_check_condition(sreq->result))
+ if (scsi_status_is_check_condition(scmd->result))
hdr->driver_status = DRIVER_SENSE;
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0;
- if (sreq->sense_len && hdr->response) {
+ if (scmd->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len,
- sreq->sense_len);
+ scmd->sense_len);
- if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
+ if (copy_to_user(uptr64(hdr->response), scmd->sense_buffer,
+ len))
ret = -EFAULT;
else
hdr->response_len = len;
}
if (rq_data_dir(rq) == READ)
- hdr->din_resid = sreq->resid_len;
+ hdr->din_resid = scmd->resid_len;
else
- hdr->dout_resid = sreq->resid_len;
+ hdr->dout_resid = scmd->resid_len;
blk_rq_unmap_user(bio);
-out_free_cmd:
- scsi_req_free_cmd(scsi_req(rq));
out_put_request:
blk_mq_free_request(rq);
return ret;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 2104973a35cd..629853662b82 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -7,7 +7,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2020 Douglas Gilbert
+ * Copyright (C) 2001 - 2021 Douglas Gilbert
*
* For documentation see http://sg.danny.cz/sg/scsi_debug.html
*/
@@ -16,14 +16,13 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
-
+#include <linux/align.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
-#include <linux/genhd.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
@@ -60,8 +59,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20200710";
+#define SDEBUG_VERSION "0191" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20210520";
#define MY_NAME "scsi_debug"
@@ -83,6 +82,7 @@ static const char *sdebug_version_date = "20200710";
#define INSUFF_RES_ASC 0x55
#define INSUFF_RES_ASCQ 0x3
#define POWER_ON_RESET_ASCQ 0x0
+#define POWER_ON_OCCURRED_ASCQ 0x1
#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
#define CAPACITY_CHANGED_ASCQ 0x9
@@ -98,6 +98,7 @@ static const char *sdebug_version_date = "20200710";
#define WRITE_BOUNDARY_ASCQ 0x5
#define READ_INVDATA_ASCQ 0x6
#define READ_BOUNDARY_ASCQ 0x7
+#define ATTEMPT_ACCESS_GAP 0x9
#define INSUFF_ZONE_ASCQ 0xe
/* Additional Sense Code Qualifier (ASCQ) */
@@ -173,7 +174,7 @@ static const char *sdebug_version_date = "20200710";
#define SDEBUG_OPT_MAC_TIMEOUT 128
#define SDEBUG_OPT_SHORT_TRANSFER 0x100
#define SDEBUG_OPT_Q_NOISE 0x200
-#define SDEBUG_OPT_ALL_TSF 0x400
+#define SDEBUG_OPT_ALL_TSF 0x400 /* ignore */
#define SDEBUG_OPT_RARE_TSF 0x800
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
@@ -196,13 +197,14 @@ static const char *sdebug_version_date = "20200710";
* priority. The UA numbers should be a sequence starting from 0 with
* SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
-#define SDEBUG_UA_BUS_RESET 1
-#define SDEBUG_UA_MODE_CHANGED 2
-#define SDEBUG_UA_CAPACITY_CHANGED 3
-#define SDEBUG_UA_LUNS_CHANGED 4
-#define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
-#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
-#define SDEBUG_NUM_UAS 7
+#define SDEBUG_UA_POOCCUR 1 /* Power on occurred */
+#define SDEBUG_UA_BUS_RESET 2
+#define SDEBUG_UA_MODE_CHANGED 3
+#define SDEBUG_UA_CAPACITY_CHANGED 4
+#define SDEBUG_UA_LUNS_CHANGED 5
+#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
+#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
+#define SDEBUG_NUM_UAS 8
/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
@@ -250,9 +252,11 @@ static const char *sdebug_version_date = "20200710";
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
- ZBC_ZONE_TYPE_CNV = 0x1,
- ZBC_ZONE_TYPE_SWR = 0x2,
- ZBC_ZONE_TYPE_SWP = 0x3,
+ ZBC_ZTYPE_CNV = 0x1,
+ ZBC_ZTYPE_SWR = 0x2,
+ ZBC_ZTYPE_SWP = 0x3,
+ /* ZBC_ZTYPE_SOBR = 0x4, */
+ ZBC_ZTYPE_GAP = 0x5,
};
/* enumeration names taken from table 26, zbcr05 */
@@ -290,10 +294,12 @@ struct sdebug_dev_info {
/* For ZBC devices */
enum blk_zoned_model zmodel;
+ unsigned int zcap;
unsigned int zsize;
unsigned int zsize_shift;
unsigned int nr_zones;
unsigned int nr_conv_zones;
+ unsigned int nr_seq_zones;
unsigned int nr_imp_open;
unsigned int nr_exp_open;
unsigned int nr_closed;
@@ -781,6 +787,7 @@ static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
static bool sdebug_strict = DEF_STRICT;
static bool sdebug_any_injecting_opt;
+static bool sdebug_no_rwlock;
static bool sdebug_verbose;
static bool have_dif_prot;
static bool write_since_sync;
@@ -827,6 +834,7 @@ static int dif_errors;
/* ZBC global data */
static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
+static int sdeb_zbc_zone_cap_mb;
static int sdeb_zbc_zone_size_mb;
static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
@@ -857,7 +865,7 @@ static const int illegal_condition_result =
(DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
static const int device_qfull_result =
- (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
+ (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
static const int condition_met_result = SAM_STAT_CONDITION_MET;
@@ -1082,6 +1090,12 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (sdebug_verbose)
cp = "power on reset";
break;
+ case SDEBUG_UA_POOCCUR:
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
+ POWER_ON_OCCURRED_ASCQ);
+ if (sdebug_verbose)
+ cp = "power on occurred";
+ break;
case SDEBUG_UA_BUS_RESET:
mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
BUS_RESET_ASCQ);
@@ -1551,6 +1565,12 @@ static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
put_unaligned_be32(devip->max_open, &arr[12]);
else
put_unaligned_be32(0xffffffff, &arr[12]);
+ if (devip->zcap < devip->zsize) {
+ arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
+ put_unaligned_be64(devip->zsize, &arr[20]);
+ } else {
+ arr[19] = 0;
+ }
return 0x3c;
}
@@ -1879,6 +1899,13 @@ static int resp_readcap16(struct scsi_cmnd *scp,
arr[14] |= 0x40;
}
+ /*
+ * Since the scsi_debug READ CAPACITY implementation always reports the
+ * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
+ */
+ if (devip->zmodel == BLK_ZONED_HM)
+ arr[12] |= 1 << 4;
+
arr[15] = sdebug_lowest_aligned & 0xff;
if (have_dif_prot) {
@@ -2581,6 +2608,18 @@ static int resp_ie_l_pg(unsigned char *arr)
return sizeof(ie_l_pg);
}
+static int resp_env_rep_l_spg(unsigned char *arr)
+{
+ unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
+ 0x0, 40, 72, 0xff, 45, 18, 0, 0,
+ 0x1, 0x0, 0x23, 0x8,
+ 0x0, 55, 72, 35, 55, 45, 0, 0,
+ };
+
+ memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
+ return sizeof(env_rep_l_spg);
+}
+
#define SDEBUG_MAX_LSENSE_SZ 512
static int resp_log_sense(struct scsi_cmnd *scp,
@@ -2633,26 +2672,47 @@ static int resp_log_sense(struct scsi_cmnd *scp,
arr[n++] = 0xff; /* this page */
arr[n++] = 0xd;
arr[n++] = 0x0; /* Temperature */
+ arr[n++] = 0xd;
+ arr[n++] = 0x1; /* Environment reporting */
+ arr[n++] = 0xd;
+ arr[n++] = 0xff; /* all 0xd subpages */
arr[n++] = 0x2f;
arr[n++] = 0x0; /* Informational exceptions */
+ arr[n++] = 0x2f;
+ arr[n++] = 0xff; /* all 0x2f subpages */
arr[3] = n - 4;
break;
case 0xd: /* Temperature subpages */
n = 4;
arr[n++] = 0xd;
arr[n++] = 0x0; /* Temperature */
+ arr[n++] = 0xd;
+ arr[n++] = 0x1; /* Environment reporting */
+ arr[n++] = 0xd;
+ arr[n++] = 0xff; /* these subpages */
arr[3] = n - 4;
break;
case 0x2f: /* Informational exceptions subpages */
n = 4;
arr[n++] = 0x2f;
arr[n++] = 0x0; /* Informational exceptions */
+ arr[n++] = 0x2f;
+ arr[n++] = 0xff; /* these subpages */
arr[3] = n - 4;
break;
default:
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
return check_condition_result;
}
+ } else if (subpcode > 0) {
+ arr[0] |= 0x40;
+ arr[1] = subpcode;
+ if (pcode == 0xd && subpcode == 1)
+ arr[3] = resp_env_rep_l_spg(arr + 4);
+ else {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+ }
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
@@ -2670,12 +2730,38 @@ static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
unsigned long long lba)
{
- return &devip->zstate[lba >> devip->zsize_shift];
+ u32 zno = lba >> devip->zsize_shift;
+ struct sdeb_zone_state *zsp;
+
+ if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
+ return &devip->zstate[zno];
+
+ /*
+ * If the zone capacity is less than the zone size, adjust for gap
+ * zones.
+ */
+ zno = 2 * zno - devip->nr_conv_zones;
+ WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
+ zsp = &devip->zstate[zno];
+ if (lba >= zsp->z_start + zsp->z_size)
+ zsp++;
+ WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
+ return zsp;
}
static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
{
- return zsp->z_type == ZBC_ZONE_TYPE_CNV;
+ return zsp->z_type == ZBC_ZTYPE_CNV;
+}
+
+static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
+{
+ return zsp->z_type == ZBC_ZTYPE_GAP;
+}
+
+static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
+{
+ return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
}
static void zbc_close_zone(struct sdebug_dev_info *devip,
@@ -2683,7 +2769,7 @@ static void zbc_close_zone(struct sdebug_dev_info *devip,
{
enum sdebug_z_cond zc;
- if (zbc_zone_is_conv(zsp))
+ if (!zbc_zone_is_seq(zsp))
return;
zc = zsp->z_cond;
@@ -2721,7 +2807,7 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
{
enum sdebug_z_cond zc;
- if (zbc_zone_is_conv(zsp))
+ if (!zbc_zone_is_seq(zsp))
return;
zc = zsp->z_cond;
@@ -2747,19 +2833,37 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
}
}
+static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
+ struct sdeb_zone_state *zsp)
+{
+ switch (zsp->z_cond) {
+ case ZC2_IMPLICIT_OPEN:
+ devip->nr_imp_open--;
+ break;
+ case ZC3_EXPLICIT_OPEN:
+ devip->nr_exp_open--;
+ break;
+ default:
+ WARN_ONCE(true, "Invalid zone %llu condition %x\n",
+ zsp->z_start, zsp->z_cond);
+ break;
+ }
+ zsp->z_cond = ZC5_FULL;
+}
+
static void zbc_inc_wp(struct sdebug_dev_info *devip,
unsigned long long lba, unsigned int num)
{
struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
- if (zbc_zone_is_conv(zsp))
+ if (!zbc_zone_is_seq(zsp))
return;
- if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ if (zsp->z_type == ZBC_ZTYPE_SWR) {
zsp->z_wp += num;
if (zsp->z_wp >= zend)
- zsp->z_cond = ZC5_FULL;
+ zbc_set_zone_full(devip, zsp);
return;
}
@@ -2778,7 +2882,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
n = num;
}
if (zsp->z_wp >= zend)
- zsp->z_cond = ZC5_FULL;
+ zbc_set_zone_full(devip, zsp);
num -= n;
lba += n;
@@ -2801,9 +2905,7 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
if (devip->zmodel == BLK_ZONED_HA)
return 0;
/* For host-managed, reads cannot cross zone types boundaries */
- if (zsp_end != zsp &&
- zbc_zone_is_conv(zsp) &&
- !zbc_zone_is_conv(zsp_end)) {
+ if (zsp->z_type != zsp_end->z_type) {
mk_sense_buffer(scp, ILLEGAL_REQUEST,
LBA_OUT_OF_RANGE,
READ_INVDATA_ASCQ);
@@ -2812,6 +2914,13 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
return 0;
}
+ /* Writing into a gap zone is not allowed */
+ if (zbc_zone_is_gap(zsp)) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
+ ATTEMPT_ACCESS_GAP);
+ return check_condition_result;
+ }
+
/* No restrictions for writes within conventional zones */
if (zbc_zone_is_conv(zsp)) {
if (!zbc_zone_is_conv(zsp_end)) {
@@ -2823,7 +2932,7 @@ static int check_zbc_access_params(struct scsi_cmnd *scp,
return 0;
}
- if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
+ if (zsp->z_type == ZBC_ZTYPE_SWR) {
/* Writes cannot cross sequential zone boundaries */
if (zsp_end != zsp) {
mk_sense_buffer(scp, ILLEGAL_REQUEST,
@@ -3118,6 +3227,70 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
return ret;
}
+static inline void
+sdeb_read_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_lock(&sip->macc_lck);
+ else
+ read_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_read_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ read_unlock(&sip->macc_lck);
+ else
+ read_unlock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_write_lock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __acquire(&sip->macc_lck);
+ else
+ __acquire(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_lock(&sip->macc_lck);
+ else
+ write_lock(&sdeb_fake_rw_lck);
+ }
+}
+
+static inline void
+sdeb_write_unlock(struct sdeb_store_info *sip)
+{
+ if (sdebug_no_rwlock) {
+ if (sip)
+ __release(&sip->macc_lck);
+ else
+ __release(&sdeb_fake_rw_lck);
+ } else {
+ if (sip)
+ write_unlock(&sip->macc_lck);
+ else
+ write_unlock(&sdeb_fake_rw_lck);
+ }
+}
+
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -3126,7 +3299,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
int ret;
u64 lba;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
u8 *cmd = scp->cmnd;
switch (cmd[0]) {
@@ -3205,29 +3377,29 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
/* DIX + T10 DIF */
if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
switch (prot_verify_read(scp, lba, num, ei_lba)) {
case 1: /* Guard tag error */
if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
return check_condition_result;
} else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
return illegal_condition_result;
}
break;
case 3: /* Reference tag error */
if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
return check_condition_result;
} else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
return illegal_condition_result;
}
@@ -3236,7 +3408,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
ret = do_device_access(sip, scp, 0, lba, num, false);
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -3424,7 +3596,6 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
int ret;
u64 lba;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 *cmd = scp->cmnd;
switch (cmd[0]) {
@@ -3479,10 +3650,10 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
"to DIF device\n");
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
ret = check_device_access_params(scp, lba, num, true);
if (ret) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return ret;
}
@@ -3491,22 +3662,22 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
switch (prot_verify_write(scp, lba, num, ei_lba)) {
case 1: /* Guard tag error */
if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
return illegal_condition_result;
} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
return check_condition_result;
}
break;
case 3: /* Reference tag error */
if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
return illegal_condition_result;
} else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
return check_condition_result;
}
@@ -3520,7 +3691,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
if (unlikely(-1 == ret))
return DID_ERROR << 16;
else if (unlikely(sdebug_verbose &&
@@ -3560,7 +3731,6 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u8 *lrdp = NULL;
u8 *up;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 wrprotect;
u16 lbdof, num_lrd, k;
u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
@@ -3628,7 +3798,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
goto err_out;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
sg_off = lbdof_blen;
/* Spec says Buffer xfer Length field in number of LBs in dout */
cum_lb = 0;
@@ -3710,7 +3880,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
ret = 0;
err_out_unlock:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
err_out:
kfree(lrdp);
return ret;
@@ -3727,15 +3897,14 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
int ret;
struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
scp->device->hostdata, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 *fs1p;
u8 *fsp;
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
ret = check_device_access_params(scp, lba, num, true);
if (ret) {
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return ret;
}
@@ -3755,7 +3924,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
- write_unlock(&sip->macc_lck);
+ sdeb_write_unlock(sip);
return DID_ERROR << 16;
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
@@ -3774,7 +3943,7 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
out:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return 0;
}
@@ -3887,7 +4056,6 @@ static int resp_comp_write(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
u8 *arr;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u64 lba;
u32 dnum;
u32 lb_size = sdebug_sector_size;
@@ -3920,7 +4088,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
ret = do_dout_fetch(scp, dnum, arr);
if (ret == -1) {
@@ -3938,7 +4106,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
if (scsi_debug_lbp())
map_region(sip, lba, num);
cleanup:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
kfree(arr);
return retval;
}
@@ -3954,7 +4122,6 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char *buf;
struct unmap_block_desc *desc;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
unsigned int i, payload_len, descriptors;
int ret;
@@ -3983,7 +4150,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
desc = (void *)&buf[8];
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
for (i = 0 ; i < descriptors ; i++) {
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
@@ -3999,7 +4166,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ret = 0;
out:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
kfree(buf);
return ret;
@@ -4091,7 +4258,6 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
u32 nblks;
u8 *cmd = scp->cmnd;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
u8 *fsp = sip->storep;
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
@@ -4113,12 +4279,12 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
rest = block + nblks - sdebug_store_sectors;
/* Try to bring the PRE-FETCH range into CPU's cache */
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
prefetch_range(fsp + (sdebug_sector_size * block),
(nblks - rest) * sdebug_sector_size);
if (rest)
prefetch_range(fsp, rest * sdebug_sector_size);
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
fini:
if (cmd[1] & 0x2)
res = SDEG_RES_IMMED_MASK;
@@ -4239,7 +4405,6 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *arr;
u8 *cmd = scp->cmnd;
struct sdeb_store_info *sip = devip2sip(devip, true);
- rwlock_t *macc_lckp = &sip->macc_lck;
bytchk = (cmd[1] >> 1) & 0x3;
if (bytchk == 0) {
@@ -4278,7 +4443,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
/* Not changing store, so only need read access */
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
ret = do_dout_fetch(scp, a_num, arr);
if (ret == -1) {
@@ -4300,27 +4465,26 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
goto cleanup;
}
cleanup:
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
kfree(arr);
return ret;
}
#define RZONES_DESC_HD 64
-/* Report zones depending on start LBA nad reporting options */
+/* Report zones depending on start LBA and reporting options */
static int resp_report_zones(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
- unsigned int i, max_zones, rep_max_zones, nrz = 0;
+ unsigned int rep_max_zones, nrz = 0;
int ret = 0;
u32 alloc_len, rep_opts, rep_len;
bool partial;
u64 lba, zs_lba;
u8 *arr = NULL, *desc;
u8 *cmd = scp->cmnd;
- struct sdeb_zone_state *zsp;
+ struct sdeb_zone_state *zsp = NULL;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
@@ -4338,9 +4502,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
return check_condition_result;
}
- max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
- rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
- max_zones);
+ rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
arr = kzalloc(alloc_len, GFP_ATOMIC);
if (!arr) {
@@ -4349,12 +4511,12 @@ static int resp_report_zones(struct scsi_cmnd *scp,
return check_condition_result;
}
- read_lock(macc_lckp);
+ sdeb_read_lock(sip);
desc = arr + 64;
- for (i = 0; i < max_zones; i++) {
- lba = zs_lba + devip->zsize * i;
- if (lba > sdebug_capacity)
+ for (lba = zs_lba; lba < sdebug_capacity;
+ lba = zsp->z_start + zsp->z_size) {
+ if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
break;
zsp = zbc_zone(devip, lba);
switch (rep_opts) {
@@ -4399,9 +4561,14 @@ static int resp_report_zones(struct scsi_cmnd *scp,
if (!zsp->z_non_seq_resource)
continue;
break;
+ case 0x3e:
+ /* All zones except gap zones. */
+ if (zbc_zone_is_gap(zsp))
+ continue;
+ break;
case 0x3f:
/* Not write pointer (conventional) zones */
- if (!zbc_zone_is_conv(zsp))
+ if (zbc_zone_is_seq(zsp))
continue;
break;
default:
@@ -4430,14 +4597,19 @@ static int resp_report_zones(struct scsi_cmnd *scp,
}
/* Report header */
+ /* Zone list length. */
put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
+ /* Maximum LBA */
put_unaligned_be64(sdebug_capacity - 1, arr + 8);
+ /* Zone starting LBA granularity. */
+ if (devip->zcap < devip->zsize)
+ put_unaligned_be64(devip->zsize, arr + 16);
rep_len = (unsigned long)desc - (unsigned long)arr;
ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
fini:
- read_unlock(macc_lckp);
+ sdeb_read_unlock(sip);
kfree(arr);
return ret;
}
@@ -4463,14 +4635,13 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
struct sdeb_zone_state *zsp;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
/* Check if all closed zones can be open */
@@ -4519,7 +4690,7 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
zbc_open_zone(devip, zsp, true);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
@@ -4540,14 +4711,13 @@ static int resp_close_zone(struct scsi_cmnd *scp,
struct sdeb_zone_state *zsp;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
zbc_close_all(devip);
@@ -4576,7 +4746,7 @@ static int resp_close_zone(struct scsi_cmnd *scp,
zbc_close_zone(devip, zsp);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
@@ -4613,14 +4783,13 @@ static int resp_finish_zone(struct scsi_cmnd *scp,
u8 *cmd = scp->cmnd;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
zbc_finish_all(devip);
@@ -4649,7 +4818,7 @@ static int resp_finish_zone(struct scsi_cmnd *scp,
zbc_finish_zone(devip, zsp, true);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
@@ -4659,7 +4828,7 @@ static void zbc_rwp_zone(struct sdebug_dev_info *devip,
enum sdebug_z_cond zc;
struct sdeb_store_info *sip = devip2sip(devip, false);
- if (zbc_zone_is_conv(zsp))
+ if (!zbc_zone_is_seq(zsp))
return;
zc = zsp->z_cond;
@@ -4694,14 +4863,13 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool all = cmd[14] & 0x01;
struct sdeb_store_info *sip = devip2sip(devip, false);
- rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
if (!sdebug_dev_is_zoned(devip)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- write_lock(macc_lckp);
+ sdeb_write_lock(sip);
if (all) {
zbc_rwp_all(devip);
@@ -4729,7 +4897,7 @@ static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
zbc_rwp_zone(devip, zsp);
fini:
- write_unlock(macc_lckp);
+ sdeb_write_unlock(sip);
return res;
}
@@ -4778,7 +4946,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
return;
}
spin_lock_irqsave(&sqp->qc_lock, iflags);
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
sqcp = &sqp->qc_arr[qc_idx];
scp = sqcp->a_cmnd;
if (unlikely(scp == NULL)) {
@@ -4850,6 +5018,7 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
{
struct sdeb_zone_state *zsp;
sector_t capacity = get_sdebug_capacity();
+ sector_t conv_capacity;
sector_t zstart = 0;
unsigned int i;
@@ -4884,11 +5053,30 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
devip->zsize_shift = ilog2(devip->zsize);
devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
- if (sdeb_zbc_nr_conv >= devip->nr_zones) {
+ if (sdeb_zbc_zone_cap_mb == 0) {
+ devip->zcap = devip->zsize;
+ } else {
+ devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
+ ilog2(sdebug_sector_size);
+ if (devip->zcap > devip->zsize) {
+ pr_err("Zone capacity too large\n");
+ return -EINVAL;
+ }
+ }
+
+ conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
+ if (conv_capacity >= capacity) {
pr_err("Number of conventional zones too large\n");
return -EINVAL;
}
devip->nr_conv_zones = sdeb_zbc_nr_conv;
+ devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
+ devip->zsize_shift;
+ devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
+
+ /* Add gap zones if zone capacity is smaller than the zone size */
+ if (devip->zcap < devip->zsize)
+ devip->nr_zones += devip->nr_seq_zones;
if (devip->zmodel == BLK_ZONED_HM) {
/* zbc_max_open_zones can be 0, meaning "not reported" */
@@ -4909,23 +5097,29 @@ static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
zsp->z_start = zstart;
if (i < devip->nr_conv_zones) {
- zsp->z_type = ZBC_ZONE_TYPE_CNV;
+ zsp->z_type = ZBC_ZTYPE_CNV;
zsp->z_cond = ZBC_NOT_WRITE_POINTER;
zsp->z_wp = (sector_t)-1;
- } else {
+ zsp->z_size =
+ min_t(u64, devip->zsize, capacity - zstart);
+ } else if ((zstart & (devip->zsize - 1)) == 0) {
if (devip->zmodel == BLK_ZONED_HM)
- zsp->z_type = ZBC_ZONE_TYPE_SWR;
+ zsp->z_type = ZBC_ZTYPE_SWR;
else
- zsp->z_type = ZBC_ZONE_TYPE_SWP;
+ zsp->z_type = ZBC_ZTYPE_SWP;
zsp->z_cond = ZC1_EMPTY;
zsp->z_wp = zsp->z_start;
+ zsp->z_size =
+ min_t(u64, devip->zcap, capacity - zstart);
+ } else {
+ zsp->z_type = ZBC_ZTYPE_GAP;
+ zsp->z_cond = ZBC_NOT_WRITE_POINTER;
+ zsp->z_wp = (sector_t)-1;
+ zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
+ capacity - zstart);
}
- if (zsp->z_start + devip->zsize < capacity)
- zsp->z_size = devip->zsize;
- else
- zsp->z_size = capacity - zsp->z_start;
-
+ WARN_ON_ONCE((int)zsp->z_size <= 0);
zstart += zsp->z_size;
}
@@ -5003,7 +5197,7 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
open_devip->lun = sdev->lun;
open_devip->sdbg_host = sdbg_host;
atomic_set(&open_devip->num_in_q, 0);
- set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
+ set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
open_devip->used = true;
return open_devip;
}
@@ -5095,8 +5289,8 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
if (sd_dp) {
- l_defer_t = sd_dp->defer_t;
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
} else
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
@@ -5135,8 +5329,8 @@ static void stop_all_queued(void)
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
if (sd_dp) {
- l_defer_t = sd_dp->defer_t;
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
} else
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
@@ -5448,18 +5642,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (scsi_result)
goto respond_in_thread;
- else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
- scsi_result = device_qfull_result;
+ scsi_result = device_qfull_result;
if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp,
- "%s: max_queue=%d exceeded, %s\n",
- __func__, sdebug_max_queue,
- (scsi_result ? "status: TASK SET FULL" :
- "report: host busy"));
- if (scsi_result)
- goto respond_in_thread;
- else
- return SCSI_MLQUEUE_HOST_BUSY;
+ sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
+ __func__, sdebug_max_queue);
+ goto respond_in_thread;
}
set_bit(k, sqp->in_use_bm);
atomic_inc(&devip->num_in_q);
@@ -5554,7 +5741,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
}
- sd_dp->defer_t = SDEB_DEFER_POLL;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
} else {
if (!sd_dp->init_hrt) {
@@ -5566,7 +5753,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
}
- sd_dp->defer_t = SDEB_DEFER_HRT;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
/* schedule the invocation of scsi_done() for a later time */
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
}
@@ -5585,7 +5772,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
}
- sd_dp->defer_t = SDEB_DEFER_POLL;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
} else {
if (!sd_dp->init_wq) {
@@ -5595,7 +5782,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->qc_idx = k;
INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
}
- sd_dp->defer_t = SDEB_DEFER_WQ;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
}
if (sdebug_statistics)
@@ -5662,6 +5849,7 @@ module_param_named(medium_error_start, sdebug_medium_error_start, int,
S_IRUGO | S_IWUSR);
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
@@ -5693,6 +5881,7 @@ module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
module_param_named(write_same_length, sdebug_write_same_length, int,
S_IRUGO | S_IWUSR);
module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
+module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
@@ -5734,6 +5923,7 @@ MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIU
MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
+MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
@@ -5763,6 +5953,7 @@ MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique de
MODULE_PARM_DESC(wp, "Write Protect (def=0)");
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
+MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
@@ -6296,6 +6487,23 @@ static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
}
+static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
+}
+
+static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ bool v;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdebug_no_rwlock = v;
+ return count;
+}
+static DRIVER_ATTR_RW(no_rwlock);
+
/*
* Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
* in range [0, sdebug_host_max_queue), we can't change it.
@@ -6656,6 +6864,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_lun_format.attr,
&driver_attr_max_luns.attr,
&driver_attr_max_queue.attr,
+ &driver_attr_no_rwlock.attr,
&driver_attr_no_uld.attr,
&driver_attr_scsi_level.attr,
&driver_attr_virtual_gb.attr,
@@ -7272,12 +7481,12 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
-static int sdebug_map_queues(struct Scsi_Host *shost)
+static void sdebug_map_queues(struct Scsi_Host *shost)
{
int i, qoff;
if (shost->nr_hw_queues == 1)
- return 0;
+ return;
for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
@@ -7299,9 +7508,6 @@ static int sdebug_map_queues(struct Scsi_Host *shost)
qoff += map->nr_queues;
}
-
- return 0;
-
}
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
@@ -7319,16 +7525,22 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct sdebug_defer *sd_dp;
sqp = sdebug_q_arr + queue_num;
+
spin_lock_irqsave(&sqp->qc_lock, iflags);
+ qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (qc_idx >= sdebug_max_queue)
+ goto unlock;
+
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
if (first) {
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
first = false;
+ if (!test_bit(qc_idx, sqp->in_use_bm))
+ continue;
} else {
qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
}
- if (unlikely(qc_idx >= sdebug_max_queue))
+ if (qc_idx >= sdebug_max_queue)
break;
sqcp = &sqp->qc_arr[qc_idx];
@@ -7341,7 +7553,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
queue_num, qc_idx, __func__);
break;
}
- if (sd_dp->defer_t == SDEB_DEFER_POLL) {
+ if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
if (kt_from_boot < sd_dp->cmpl_ts)
continue;
@@ -7375,13 +7587,18 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
else
atomic_set(&retired_max_queue, k + 1);
}
- sd_dp->defer_t = SDEB_DEFER_NONE;
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
scsi_done(scp); /* callback to mid level */
- spin_lock_irqsave(&sqp->qc_lock, iflags);
num_entries++;
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
+ break;
}
+
+unlock:
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+
if (num_entries > 0)
atomic_add(num_entries, &sdeb_mq_poll_count);
return num_entries;
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index d9109771f274..217b70c678c3 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -9,6 +9,7 @@
static const char *const scsi_cmd_flags[] = {
SCSI_CMD_FLAG_NAME(TAGGED),
SCSI_CMD_FLAG_NAME(INITIALIZED),
+ SCSI_CMD_FLAG_NAME(LAST),
};
#undef SCSI_CMD_FLAG_NAME
@@ -32,14 +33,12 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags,
void scsi_show_rq(struct seq_file *m, struct request *rq)
{
- struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
int alloc_ms = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
int timeout_ms = jiffies_to_msecs(rq->timeout);
- const u8 *const cdb = READ_ONCE(cmd->cmnd);
char buf[80] = "(?)";
- if (cdb)
- __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
+ __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len);
seq_printf(m, ", .cmd=%s, .retries=%d, .result = %#x, .flags=", buf,
cmd->retries, cmd->result);
scsi_flags_show(m, cmd->flags, scsi_cmd_flags,
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2371edbc3af4..6995c8979230 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -133,30 +133,13 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
return true;
}
-static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost)
-{
- unsigned long flags;
-
- spin_lock_irqsave(shost->host_lock, flags);
- list_del_init(&scmd->eh_entry);
- /*
- * If the abort succeeds, and there is no further
- * EH action, clear the ->last_reset time.
- */
- if (list_empty(&shost->eh_abort_list) &&
- list_empty(&shost->eh_cmd_q))
- if (shost->eh_deadline != -1)
- shost->last_reset = 0;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
*
* Note: this function must be called only for a command that has timed out.
* Because the block layer marks a request as complete before it calls
- * scsi_times_out(), a .scsi_done() call from the LLD for a command that has
+ * scsi_timeout(), a .scsi_done() call from the LLD for a command that has
* timed out do not have any effect. Hence it is safe to call
* scsi_finish_command() from this function.
*/
@@ -166,54 +149,72 @@ scmd_eh_abort_handler(struct work_struct *work)
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
enum scsi_disposition rtn;
unsigned long flags;
- if (scsi_host_eh_past_deadline(sdev->host)) {
+ if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not aborting\n"));
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
+ goto out;
+ }
+
+ SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
- rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
- if (rtn == SUCCESS) {
- set_host_byte(scmd, DID_TIME_OUT);
- if (scsi_host_eh_past_deadline(sdev->host)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "eh timeout, not retrying "
- "aborted command\n"));
- } else if (!scsi_noretry_cmd(scmd) &&
- scsi_cmd_retry_allowed(scmd) &&
- scsi_eh_should_retry_cmd(scmd)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "retry aborted command\n"));
- scsi_eh_complete_abort(scmd, sdev->host);
- scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
- return;
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "finish aborted command\n"));
- scsi_eh_complete_abort(scmd, sdev->host);
- scsi_finish_command(scmd);
- return;
- }
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "cmd abort %s\n",
- (rtn == FAST_IO_FAIL) ?
- "not send" : "failed"));
- }
+ rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+ if (rtn != SUCCESS) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "cmd abort %s\n",
+ (rtn == FAST_IO_FAIL) ?
+ "not send" : "failed"));
+ goto out;
+ }
+ set_host_byte(scmd, DID_TIME_OUT);
+ if (scsi_host_eh_past_deadline(shost)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "eh timeout, not retrying "
+ "aborted command\n"));
+ goto out;
}
- spin_lock_irqsave(sdev->host->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
list_del_init(&scmd->eh_entry);
- spin_unlock_irqrestore(sdev->host->host_lock, flags);
+
+ /*
+ * If the abort succeeds, and there is no further
+ * EH action, clear the ->last_reset time.
+ */
+ if (list_empty(&shost->eh_abort_list) &&
+ list_empty(&shost->eh_cmd_q))
+ if (shost->eh_deadline != -1)
+ shost->last_reset = 0;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (!scsi_noretry_cmd(scmd) &&
+ scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "retry aborted command\n"));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "finish aborted command\n"));
+ scsi_finish_command(scmd);
+ }
+ return;
+
+out:
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del_init(&scmd->eh_entry);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
scsi_eh_scmd_add(scmd);
}
@@ -315,7 +316,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
}
/**
- * scsi_times_out - Timeout function for normal scsi commands.
+ * scsi_timeout - Timeout function for normal scsi commands.
* @req: request that is timing out.
*
* Notes:
@@ -324,7 +325,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
-enum blk_eh_timer_return scsi_times_out(struct request *req)
+enum blk_eh_timer_return scsi_timeout(struct request *req)
{
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum blk_eh_timer_return rtn = BLK_EH_DONE;
@@ -333,6 +334,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
+ atomic_inc(&scmd->device->iotmo_cnt);
if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
@@ -462,14 +464,12 @@ static void scsi_report_sense(struct scsi_device *sdev,
evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
scsi_report_lun_change(sdev);
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
"LUN assignments on this target have "
"changed. The Linux SCSI layer does not "
"automatically remap LUN assignments.\n");
} else if (sshdr->asc == 0x3f)
sdev_printk(KERN_WARNING, sdev,
- "Warning! Received an indication that the "
- "operating parameters on this target have "
+ "Operating parameters on this target have "
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
@@ -483,8 +483,13 @@ static void scsi_report_sense(struct scsi_device *sdev,
if (sshdr->asc == 0x29) {
evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
- sdev_printk(KERN_WARNING, sdev,
- "Power-on or device reset occurred\n");
+ /*
+ * Do not print message if it is an expected side-effect
+ * of runtime PM.
+ */
+ if (!sdev->silence_suspend)
+ sdev_printk(KERN_WARNING, sdev,
+ "Power-on or device reset occurred\n");
}
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
@@ -510,6 +515,11 @@ static void scsi_report_sense(struct scsi_device *sdev,
}
}
+static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status)
+{
+ cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
+}
+
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
@@ -640,7 +650,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
case DATA_PROTECT:
if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
/* Thin provisioning hard threshold reached */
- set_host_byte(scmd, DID_ALLOC_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_NOSPC);
return SUCCESS;
}
fallthrough;
@@ -648,14 +658,14 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- set_host_byte(scmd, DID_MEDIUM_ERROR);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_MED_ERROR);
return SUCCESS;
}
return NEEDS_RETRY;
@@ -664,7 +674,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
fallthrough;
case ILLEGAL_REQUEST:
@@ -674,7 +684,7 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26 || /* Parameter value invalid */
sshdr.asc == 0x27) { /* Write protected */
- set_host_byte(scmd, DID_TARGET_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
}
return SUCCESS;
@@ -979,7 +989,7 @@ static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
* @scmd: SCSI command structure to hijack
* @ses: structure to save restore information
* @cmnd: CDB to send. Can be NULL if no new cmnd is needed
- * @cmnd_size: size in bytes of @cmnd (must be <= BLK_MAX_CDB)
+ * @cmnd_size: size in bytes of @cmnd (must be <= MAX_COMMAND_SIZE)
* @sense_bytes: size of sense data to copy. or 0 (if != 0 @cmnd is ignored)
*
* This function is used to save a scsi command information before re-execution
@@ -1001,22 +1011,21 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
* command.
*/
ses->cmd_len = scmd->cmd_len;
- ses->cmnd = scmd->cmnd;
ses->data_direction = scmd->sc_data_direction;
ses->sdb = scmd->sdb;
ses->result = scmd->result;
- ses->resid_len = scmd->req.resid_len;
+ ses->resid_len = scmd->resid_len;
ses->underflow = scmd->underflow;
ses->prot_op = scmd->prot_op;
ses->eh_eflags = scmd->eh_eflags;
scmd->prot_op = SCSI_PROT_NORMAL;
scmd->eh_eflags = 0;
- scmd->cmnd = ses->eh_cmnd;
- memset(scmd->cmnd, 0, BLK_MAX_CDB);
+ memcpy(ses->cmnd, scmd->cmnd, sizeof(ses->cmnd));
+ memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->result = 0;
- scmd->req.resid_len = 0;
+ scmd->resid_len = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1032,7 +1041,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
} else {
scmd->sc_data_direction = DMA_NONE;
if (cmnd) {
- BUG_ON(cmnd_size > BLK_MAX_CDB);
+ BUG_ON(cmnd_size > sizeof(scmd->cmnd));
memcpy(scmd->cmnd, cmnd, cmnd_size);
scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
}
@@ -1065,11 +1074,11 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
* Restore original data
*/
scmd->cmd_len = ses->cmd_len;
- scmd->cmnd = ses->cmnd;
+ memcpy(scmd->cmnd, ses->cmnd, sizeof(ses->cmnd));
scmd->sc_data_direction = ses->data_direction;
scmd->sdb = ses->sdb;
scmd->result = ses->result;
- scmd->req.resid_len = ses->resid_len;
+ scmd->resid_len = ses->resid_len;
scmd->underflow = ses->underflow;
scmd->prot_op = ses->prot_op;
scmd->eh_eflags = ses->eh_eflags;
@@ -1429,7 +1438,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
enum scsi_disposition rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
- rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
+ scmd->device->eh_timeout, 0);
if (rtn == SUCCESS)
return 0;
@@ -1773,7 +1783,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
* scsi_noretry_cmd - determine if command should be failed fast
* @scmd: SCSI cmd to examine.
*/
-int scsi_noretry_cmd(struct scsi_cmnd *scmd)
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd)
{
struct request *req = scsi_cmd_to_rq(scmd);
@@ -1783,19 +1793,19 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
case DID_TIME_OUT:
goto check_type;
case DID_BUS_BUSY:
- return req->cmd_flags & REQ_FAILFAST_TRANSPORT;
+ return !!(req->cmd_flags & REQ_FAILFAST_TRANSPORT);
case DID_PARITY:
- return req->cmd_flags & REQ_FAILFAST_DEV;
+ return !!(req->cmd_flags & REQ_FAILFAST_DEV);
case DID_ERROR:
if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
- return 0;
+ return false;
fallthrough;
case DID_SOFT_ERROR:
- return req->cmd_flags & REQ_FAILFAST_DRIVER;
+ return !!(req->cmd_flags & REQ_FAILFAST_DRIVER);
}
if (!scsi_status_is_check_condition(scmd->result))
- return 0;
+ return false;
check_type:
/*
@@ -1803,9 +1813,9 @@ check_type:
* the check condition was retryable.
*/
if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req))
- return 1;
+ return true;
- return 0;
+ return false;
}
/**
@@ -1979,10 +1989,8 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
case SAM_STAT_RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
- set_host_byte(scmd, DID_NEXUS_FAILURE);
+ set_scsi_ml_byte(scmd, SCSIML_STAT_RESV_CONFLICT);
return SUCCESS; /* causes immediate i/o error */
- default:
- return FAILED;
}
return FAILED;
@@ -2002,9 +2010,11 @@ maybe_retry:
}
}
-static void eh_lock_door_done(struct request *req, blk_status_t status)
+static enum rq_end_io_ret eh_lock_door_done(struct request *req,
+ blk_status_t status)
{
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
/**
@@ -2020,27 +2030,28 @@ static void eh_lock_door_done(struct request *req, blk_status_t status)
*/
static void scsi_eh_lock_door(struct scsi_device *sdev)
{
+ struct scsi_cmnd *scmd;
struct request *req;
- struct scsi_request *rq;
req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return;
- rq = scsi_req(req);
+ scmd = blk_mq_rq_to_pdu(req);
- rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
- rq->cmd[1] = 0;
- rq->cmd[2] = 0;
- rq->cmd[3] = 0;
- rq->cmd[4] = SCSI_REMOVAL_PREVENT;
- rq->cmd[5] = 0;
- rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+ scmd->cmnd[0] = ALLOW_MEDIUM_REMOVAL;
+ scmd->cmnd[1] = 0;
+ scmd->cmnd[2] = 0;
+ scmd->cmnd[3] = 0;
+ scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
+ scmd->cmnd[5] = 0;
+ scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+ scmd->allowed = 5;
req->rq_flags |= RQF_QUIET;
req->timeout = 10 * HZ;
- rq->retries = 5;
+ req->end_io = eh_lock_door_done;
- blk_execute_rq_nowait(NULL, req, 1, eh_lock_door_done);
+ blk_execute_rq_nowait(req, true);
}
/**
@@ -2397,7 +2408,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg)
scmd = (struct scsi_cmnd *)(rq + 1);
scsi_init_command(dev, scmd);
- scmd->cmnd = scsi_req(rq)->cmd;
scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 400df3354cd6..2d20da55fb64 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -345,19 +345,15 @@ EXPORT_SYMBOL(scsi_cmd_allowed);
static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
struct sg_io_hdr *hdr, fmode_t mode)
{
- struct scsi_request *req = scsi_req(rq);
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
if (hdr->cmd_len < 6)
return -EMSGSIZE;
- if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
+ if (copy_from_user(scmd->cmnd, hdr->cmdp, hdr->cmd_len))
return -EFAULT;
- if (!scsi_cmd_allowed(req->cmd, mode))
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
return -EPERM;
-
- /*
- * fill in request structure
- */
- req->cmd_len = hdr->cmd_len;
+ scmd->cmd_len = hdr->cmd_len;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
@@ -373,29 +369,29 @@ static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq,
static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
struct bio *bio)
{
- struct scsi_request *req = scsi_req(rq);
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
int r, ret = 0;
/*
* fill in all the output members
*/
- hdr->status = req->result & 0xff;
- hdr->masked_status = status_byte(req->result);
+ hdr->status = scmd->result & 0xff;
+ hdr->masked_status = status_byte(scmd->result);
hdr->msg_status = COMMAND_COMPLETE;
- hdr->host_status = host_byte(req->result);
+ hdr->host_status = host_byte(scmd->result);
hdr->driver_status = 0;
if (scsi_status_is_check_condition(hdr->status))
hdr->driver_status = DRIVER_SENSE;
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
- hdr->resid = req->resid_len;
+ hdr->resid = scmd->resid_len;
hdr->sb_len_wr = 0;
- if (req->sense_len && hdr->sbp) {
- int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
+ if (scmd->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, scmd->sense_len);
- if (!copy_to_user(hdr->sbp, req->sense, len))
+ if (!copy_to_user(hdr->sbp, scmd->sense_buffer, len))
hdr->sb_len_wr = len;
else
ret = -EFAULT;
@@ -408,15 +404,14 @@ static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return ret;
}
-static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
- struct sg_io_hdr *hdr, fmode_t mode)
+static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
ssize_t ret = 0;
int writing = 0;
int at_head = 0;
struct request *rq;
- struct scsi_request *req;
+ struct scsi_cmnd *scmd;
struct bio *bio;
if (hdr->interface_id != 'S')
@@ -439,58 +434,38 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
if (hdr->flags & SG_FLAG_Q_AT_HEAD)
at_head = 1;
- ret = -ENOMEM;
rq = scsi_alloc_request(sdev->request_queue, writing ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
- req = scsi_req(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
- if (hdr->cmd_len > BLK_MAX_CDB) {
- req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
- if (!req->cmd)
- goto out_put_request;
+ if (hdr->cmd_len > sizeof(scmd->cmnd)) {
+ ret = -EINVAL;
+ goto out_put_request;
}
ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode);
if (ret < 0)
- goto out_free_cdb;
-
- ret = 0;
- if (hdr->iovec_count) {
- struct iov_iter i;
- struct iovec *iov = NULL;
-
- ret = import_iovec(rq_data_dir(rq), hdr->dxferp,
- hdr->iovec_count, 0, &iov, &i);
- if (ret < 0)
- goto out_free_cdb;
-
- /* SG_IO howto says that the shorter of the two wins */
- iov_iter_truncate(&i, hdr->dxfer_len);
-
- ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL);
- kfree(iov);
- } else if (hdr->dxfer_len)
- ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp,
- hdr->dxfer_len, GFP_KERNEL);
+ goto out_put_request;
+ ret = blk_rq_map_user_io(rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL, hdr->iovec_count && hdr->dxfer_len,
+ hdr->iovec_count, 0, rq_data_dir(rq));
if (ret)
- goto out_free_cdb;
+ goto out_put_request;
bio = rq->bio;
- req->retries = 0;
+ scmd->allowed = 0;
start_time = jiffies;
- blk_execute_rq(disk, rq, at_head);
+ blk_execute_rq(rq, at_head);
hdr->duration = jiffies_to_msecs(jiffies - start_time);
ret = scsi_complete_sghdr_rq(rq, hdr, bio);
-out_free_cdb:
- scsi_req_free_cmd(req);
out_put_request:
blk_mq_free_request(rq);
return ret;
@@ -499,19 +474,12 @@ out_put_request:
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
* @q: request queue to send scsi commands down
- * @disk: gendisk to operate on (option)
* @mode: mode used to open the file through which the ioctl has been
* submitted
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
- * the request queue @q. If @file is non-NULL it's used to perform
- * fine-grained permission checks that allow users to send down
- * non-destructive SCSI commands. If the caller has a struct gendisk
- * available it should be passed in as @disk to allow the low level
- * driver to use the information contained in it. A non-NULL @disk
- * is only allowed if the caller knows that the low level driver doesn't
- * need it (e.g. in the scsi subsystem).
+ * the request queue @q.
*
* Notes:
* - This interface is deprecated - users should use the SG_IO
@@ -530,14 +498,13 @@ out_put_request:
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
-static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
- fmode_t mode, struct scsi_ioctl_command __user *sic)
+static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
+ struct scsi_ioctl_command __user *sic)
{
- enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
struct request *rq;
- struct scsi_request *req;
int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ struct scsi_cmnd *scmd;
char *buffer = NULL;
if (!sic)
@@ -568,7 +535,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
err = PTR_ERR(rq);
goto error_free_buffer;
}
- req = scsi_req(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
cmdlen = COMMAND_SIZE(opcode);
@@ -576,25 +543,25 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
* get command and data to send to device, if any
*/
err = -EFAULT;
- req->cmd_len = cmdlen;
- if (copy_from_user(req->cmd, sic->data, cmdlen))
+ scmd->cmd_len = cmdlen;
+ if (copy_from_user(scmd->cmnd, sic->data, cmdlen))
goto error;
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
goto error;
err = -EPERM;
- if (!scsi_cmd_allowed(req->cmd, mode))
+ if (!scsi_cmd_allowed(scmd->cmnd, mode))
goto error;
/* default. possible overridden later */
- req->retries = 5;
+ scmd->allowed = 5;
switch (opcode) {
case SEND_DIAGNOSTIC:
case FORMAT_UNIT:
rq->timeout = FORMAT_UNIT_TIMEOUT;
- req->retries = 1;
+ scmd->allowed = 1;
break;
case START_STOP:
rq->timeout = START_STOP_TIMEOUT;
@@ -607,7 +574,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
break;
case READ_DEFECT_DATA:
rq->timeout = READ_DEFECT_DATA_TIMEOUT;
- req->retries = 1;
+ scmd->allowed = 1;
break;
default:
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
@@ -620,14 +587,14 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
goto error;
}
- blk_execute_rq(disk, rq, 0);
+ blk_execute_rq(rq, false);
- err = req->result & 0xff; /* only 8 bit SCSI status */
+ err = scmd->result & 0xff; /* only 8 bit SCSI status */
if (err) {
- if (req->sense_len && req->sense) {
- bytes = (OMAX_SB_LEN > req->sense_len) ?
- req->sense_len : OMAX_SB_LEN;
- if (copy_to_user(sic->data, req->sense, bytes))
+ if (scmd->sense_len && scmd->sense_buffer) {
+ /* limit sense len for backward compatibility */
+ if (copy_to_user(sic->data, scmd->sense_buffer,
+ min(scmd->sense_len, 16U)))
err = -EFAULT;
}
} else {
@@ -806,8 +773,8 @@ static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
return 0;
}
-static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk,
- fmode_t mode, void __user *arg)
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode,
+ void __user *arg)
{
struct cdrom_generic_command cgc;
struct sg_io_hdr hdr;
@@ -847,7 +814,7 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk
hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
hdr.cmd_len = sizeof(cgc.cmd);
- err = sg_io(sdev, disk, &hdr, mode);
+ err = sg_io(sdev, &hdr, mode);
if (err == -EFAULT)
return -EFAULT;
@@ -862,8 +829,8 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk
return err;
}
-static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
- fmode_t mode, void __user *argp)
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
+ void __user *argp)
{
struct sg_io_hdr hdr;
int error;
@@ -871,7 +838,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
error = get_sg_io_hdr(&hdr, argp);
if (error)
return error;
- error = sg_io(sdev, disk, &hdr, mode);
+ error = sg_io(sdev, &hdr, mode);
if (error == -EFAULT)
return error;
if (put_sg_io_hdr(&hdr, argp))
@@ -882,7 +849,6 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
/**
* scsi_ioctl - Dispatch ioctl to scsi device
* @sdev: scsi device receiving ioctl
- * @disk: disk receiving the ioctl
* @mode: mode the block/char device is opened with
* @cmd: which ioctl is it
* @arg: data associated with ioctl
@@ -891,8 +857,8 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
* does not take a major/minor number as the dev field. Rather, it takes
* a pointer to a &struct scsi_device.
*/
-int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
- int cmd, void __user *arg)
+int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+ void __user *arg)
{
struct request_queue *q = sdev->request_queue;
struct scsi_sense_hdr sense_hdr;
@@ -927,11 +893,11 @@ int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
case SG_EMULATED_HOST:
return sg_emulated_host(q, arg);
case SG_IO:
- return scsi_ioctl_sg_io(sdev, disk, mode, arg);
+ return scsi_ioctl_sg_io(sdev, mode, arg);
case SCSI_IOCTL_SEND_COMMAND:
- return sg_scsi_ioctl(q, disk, mode, arg);
+ return sg_scsi_ioctl(q, mode, arg);
case CDROM_SEND_PACKET:
- return scsi_cdrom_send_packet(sdev, disk, mode, arg);
+ return scsi_cdrom_send_packet(sdev, mode, arg);
case CDROMCLOSETRAY:
return scsi_send_start_stop(sdev, 3);
case CDROMEJECT:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 621d841d819a..8b89fab7c420 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -75,13 +75,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
return ret;
}
-/*
- * When to reinvoke queueing after a resource shortage. It's 3 msecs to
- * not change behaviour from the previous unplug mechanism, experimentation
- * may prove this needs changing.
- */
-#define SCSI_QUEUE_DELAY 3
-
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
@@ -118,7 +111,7 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
}
}
-static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{
struct request *rq = scsi_cmd_to_rq(cmd);
@@ -128,7 +121,12 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
} else {
WARN_ON_ONCE(true);
}
- blk_mq_requeue_request(rq, true);
+
+ if (msecs) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ } else
+ blk_mq_requeue_request(rq, true);
}
/**
@@ -163,7 +161,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* Requeue this command. It will go before all other commands
* that are already in the queue. Schedule requeue work under
* lock such that the kblockd_schedule_work() call happens
- * before blk_cleanup_queue() finishes.
+ * before blk_mq_destroy_queue() finishes.
*/
cmd->result = 0;
@@ -209,11 +207,11 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, u64 flags, req_flags_t rq_flags,
- int *resid)
+ int timeout, int retries, blk_opf_t flags,
+ req_flags_t rq_flags, int *resid)
{
struct request *req;
- struct scsi_request *rq;
+ struct scsi_cmnd *scmd;
int ret;
req = scsi_alloc_request(sdev->request_queue,
@@ -223,17 +221,16 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
if (IS_ERR(req))
return PTR_ERR(req);
- rq = scsi_req(req);
-
if (bufflen) {
ret = blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, GFP_NOIO);
if (ret)
goto out;
}
- rq->cmd_len = COMMAND_SIZE(cmd[0]);
- memcpy(rq->cmd, cmd, rq->cmd_len);
- rq->retries = retries;
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(scmd->cmnd, cmd, scmd->cmd_len);
+ scmd->allowed = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
req->rq_flags |= rq_flags | RQF_QUIET;
@@ -241,7 +238,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/*
* head injection *required* here otherwise quiesce won't work
*/
- blk_execute_rq(NULL, req, 1);
+ blk_execute_rq(req, true);
/*
* Some devices (USB mass-storage in particular) may transfer
@@ -249,16 +246,17 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
- if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
- memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
+ if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
+ memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
if (resid)
- *resid = rq->resid_len;
- if (sense && rq->sense_len)
- memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
+ *resid = scmd->resid_len;
+ if (sense && scmd->sense_len)
+ memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
if (sshdr)
- scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
- ret = rq->result;
+ scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
+ sshdr);
+ ret = scmd->result;
out:
blk_mq_free_request(req);
@@ -424,9 +422,9 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
* it and the queue. Mitigate by taking a reference to the
* queue and never touching the sdev again after we drop the
* host lock. Note: if __scsi_remove_device() invokes
- * blk_cleanup_queue() before the queue is run from this
+ * blk_mq_destroy_queue() before the queue is run from this
* function then blk_run_queue() will return immediately since
- * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
+ * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING.
*/
slq = sdev->request_queue;
if (!blk_get_queue(slq))
@@ -543,8 +541,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_update_request(req, error, bytes))
return true;
+ // XXX:
if (blk_queue_add_random(q))
- add_disk_randomness(req->rq_disk);
+ add_disk_randomness(req->q->disk);
if (!blk_rq_is_passthrough(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
@@ -582,16 +581,36 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
return false;
}
+static inline u8 get_scsi_ml_byte(int result)
+{
+ return (result >> 8) & 0xff;
+}
+
/**
* scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
- * @cmd: SCSI command
* @result: scsi error code
*
- * Translate a SCSI result code into a blk_status_t value. May reset the host
- * byte of @cmd->result.
+ * Translate a SCSI result code into a blk_status_t value.
*/
-static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
+static blk_status_t scsi_result_to_blk_status(int result)
{
+ /*
+ * Check the scsi-ml byte first in case we converted a host or status
+ * byte.
+ */
+ switch (get_scsi_ml_byte(result)) {
+ case SCSIML_STAT_OK:
+ break;
+ case SCSIML_STAT_RESV_CONFLICT:
+ return BLK_STS_NEXUS;
+ case SCSIML_STAT_NOSPC:
+ return BLK_STS_NOSPC;
+ case SCSIML_STAT_MED_ERROR:
+ return BLK_STS_MEDIUM;
+ case SCSIML_STAT_TGT_FAILURE:
+ return BLK_STS_TARGET;
+ }
+
switch (host_byte(result)) {
case DID_OK:
if (scsi_status_is_good(result))
@@ -600,29 +619,49 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
case DID_TRANSPORT_FAILFAST:
case DID_TRANSPORT_MARGINAL:
return BLK_STS_TRANSPORT;
- case DID_TARGET_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_TARGET;
- case DID_NEXUS_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NEXUS;
- case DID_ALLOC_FAILURE:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_NOSPC;
- case DID_MEDIUM_ERROR:
- set_host_byte(cmd, DID_OK);
- return BLK_STS_MEDIUM;
default:
return BLK_STS_IOERR;
}
}
-/* Helper for scsi_io_completion() when "reprep" action required. */
-static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
- struct request_queue *q)
+/**
+ * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ */
+static unsigned int scsi_rq_err_bytes(const struct request *rq)
{
- /* A new command will be prepared and issued. */
- scsi_mq_requeue_cmd(cmd);
+ blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->rq_flags & RQF_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_opf & ff) != ff)
+ break;
+ bytes += bio->bi_iter.bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
}
static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
@@ -642,14 +681,21 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
return false;
}
+/*
+ * When ALUA transition state is returned, reprep the cmd to
+ * use the ALUA handler's transition timeout. Delay the reprep
+ * 1 sec to avoid aggressive retries of the target in that
+ * state.
+ */
+#define ALUA_TRANSITION_REPREP_DELAY 1000
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
int level = 0;
- enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
- ACTION_DELAYED_RETRY} action;
+ enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP,
+ ACTION_RETRY, ACTION_DELAYED_RETRY} action;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -659,7 +705,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
if (sense_valid)
sense_current = !scsi_sense_is_deferred(&sshdr);
- blk_stat = scsi_result_to_blk_status(cmd, result);
+ blk_stat = scsi_result_to_blk_status(result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -738,8 +784,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
- blk_stat = BLK_STS_AGAIN;
- fallthrough;
+ action = ACTION_DELAYED_REPREP;
+ break;
default:
action = ACTION_FAIL;
break;
@@ -794,11 +840,14 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
+ if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
return;
fallthrough;
case ACTION_REPREP:
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
+ break;
+ case ACTION_DELAYED_REPREP:
+ scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY);
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -833,19 +882,18 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
/*
* SG_IO wants current and deferred errors
*/
- scsi_req(req)->sense_len =
- min(8 + cmd->sense_buffer[7],
- SCSI_SENSE_BUFFERSIZE);
+ cmd->sense_len = min(8 + cmd->sense_buffer[7],
+ SCSI_SENSE_BUFFERSIZE);
}
if (sense_current)
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
} else if (blk_rq_bytes(req) == 0 && sense_current) {
/*
* Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets *blk_statp explicitly for the problem case.
*/
- *blk_statp = scsi_result_to_blk_status(cmd, result);
+ *blk_statp = scsi_result_to_blk_status(result);
}
/*
* Recovered errors need reporting, but they're always treated as
@@ -893,7 +941,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
* command block will be released and the queue function will be goosed. If we
* are not done then we have to figure out what to do next:
*
- * a) We can call scsi_io_completion_reprep(). The request will be
+ * a) We can call scsi_mq_requeue_cmd(). The request will be
* unprepared and put back on the queue. Then a new command will
* be created for it. This should be used if we made forward
* progress, or if we want to switch from READ(10) to READ(6) for
@@ -909,20 +957,12 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
- struct request_queue *q = cmd->device->request_queue;
struct request *req = scsi_cmd_to_rq(cmd);
blk_status_t blk_stat = BLK_STS_OK;
if (unlikely(result)) /* a nz result may or may not be an error */
result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
- if (unlikely(blk_rq_is_passthrough(req))) {
- /*
- * scsi_result_to_blk_status may have reset the host_byte
- */
- scsi_req(req)->result = cmd->result;
- }
-
/*
* Next deal with any sectors which we were able to correctly
* handle.
@@ -953,7 +993,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* request just queue the command up again.
*/
if (likely(result == 0))
- scsi_io_completion_reprep(cmd, q);
+ scsi_mq_requeue_cmd(cmd, 0);
else
scsi_io_completion_action(cmd, result);
}
@@ -1083,24 +1123,21 @@ EXPORT_SYMBOL(scsi_alloc_sgtables);
static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- struct scsi_request *req = &cmd->req;
-
- memset(req->__cmd, 0, sizeof(req->__cmd));
- req->cmd = req->__cmd;
- req->cmd_len = BLK_MAX_CDB;
- req->sense_len = 0;
+ memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
+ cmd->cmd_len = MAX_COMMAND_SIZE;
+ cmd->sense_len = 0;
init_rcu_head(&cmd->rcu);
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
-struct request *scsi_alloc_request(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags)
+struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
+ blk_mq_req_flags_t flags)
{
struct request *rq;
- rq = blk_mq_alloc_request(q, op, flags);
+ rq = blk_mq_alloc_request(q, opf, flags);
if (!IS_ERR(rq))
scsi_initialize_rq(rq);
return rq;
@@ -1122,45 +1159,16 @@ static void scsi_cleanup_rq(struct request *rq)
/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
- void *buf = cmd->sense_buffer;
- void *prot = cmd->prot_sdb;
struct request *rq = scsi_cmd_to_rq(cmd);
- unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
- unsigned long jiffies_at_alloc;
- int retries, to_clear;
- bool in_flight;
- int budget_token = cmd->budget_token;
-
- if (!blk_rq_is_passthrough(rq) && !(flags & SCMD_INITIALIZED)) {
- flags |= SCMD_INITIALIZED;
+
+ if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) {
+ cmd->flags |= SCMD_INITIALIZED;
scsi_initialize_rq(rq);
}
- jiffies_at_alloc = cmd->jiffies_at_alloc;
- retries = cmd->retries;
- in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /*
- * Zero out the cmd, except for the embedded scsi_request. Only clear
- * the driver-private command data if the LLD does not supply a
- * function to initialize that data.
- */
- to_clear = sizeof(*cmd) - sizeof(cmd->req);
- if (!dev->host->hostt->init_cmd_priv)
- to_clear += dev->host->hostt->cmd_size;
- memset((char *)cmd + sizeof(cmd->req), 0, to_clear);
-
cmd->device = dev;
- cmd->sense_buffer = buf;
- cmd->prot_sdb = prot;
- cmd->flags = flags;
INIT_LIST_HEAD(&cmd->eh_entry);
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- cmd->jiffies_at_alloc = jiffies_at_alloc;
- cmd->retries = retries;
- if (in_flight)
- __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- cmd->budget_token = budget_token;
-
}
static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
@@ -1184,10 +1192,7 @@ static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
}
- cmd->cmd_len = scsi_req(req)->cmd_len;
- cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
- cmd->allowed = scsi_req(req)->retries;
return BLK_STS_OK;
}
@@ -1235,7 +1240,7 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
* power management commands.
*/
if (req && !(req->rq_flags & RQF_PM))
- return BLK_STS_IOERR;
+ return BLK_STS_OFFLINE;
return BLK_STS_OK;
}
}
@@ -1545,10 +1550,32 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
+ bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
struct scatterlist *sg;
scsi_init_command(sdev, cmd);
+ cmd->eh_eflags = 0;
+ cmd->prot_type = 0;
+ cmd->prot_flags = 0;
+ cmd->submitter = 0;
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+ cmd->underflow = 0;
+ cmd->transfersize = 0;
+ cmd->host_scribble = NULL;
+ cmd->result = 0;
+ cmd->extra_len = 0;
+ cmd->state = 0;
+ if (in_flight)
+ __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
+
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (!shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
@@ -1579,13 +1606,16 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
return ret;
}
- cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
- memset(cmd->cmnd, 0, BLK_MAX_CDB);
+ /* Usually overridden by the ULP */
+ cmd->allowed = 0;
+ memset(cmd->cmnd, 0, sizeof(cmd->cmnd));
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
-void scsi_done(struct scsi_cmnd *cmd)
+static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly)
{
+ struct request *req = scsi_cmd_to_rq(cmd);
+
switch (cmd->submitter) {
case SUBMITTED_BY_BLOCK_LAYER:
break;
@@ -1600,10 +1630,25 @@ void scsi_done(struct scsi_cmnd *cmd)
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
return;
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(scsi_cmd_to_rq(cmd));
+
+ if (complete_directly)
+ blk_mq_complete_request_direct(req, scsi_complete);
+ else
+ blk_mq_complete_request(req);
+}
+
+void scsi_done(struct scsi_cmnd *cmd)
+{
+ scsi_done_internal(cmd, false);
}
EXPORT_SYMBOL(scsi_done);
+void scsi_done_direct(struct scsi_cmnd *cmd)
+{
+ scsi_done_internal(cmd, true);
+}
+EXPORT_SYMBOL(scsi_done_direct);
+
static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{
struct scsi_device *sdev = q->queuedata;
@@ -1611,6 +1656,13 @@ static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
sbitmap_put(&sdev->budget_map, budget_token);
}
+/*
+ * When to reinvoke queueing after a resource shortage. It's 3 msecs to
+ * not change behaviour from the previous unplug mechanism, experimentation
+ * may prove this needs changing.
+ */
+#define SCSI_QUEUE_DELAY 3
+
static int scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
@@ -1731,15 +1783,15 @@ out_put_budget:
ret = BLK_STS_DEV_RESOURCE;
break;
case BLK_STS_AGAIN:
- scsi_req(req)->result = DID_BUS_BUSY << 16;
+ cmd->result = DID_BUS_BUSY << 16;
if (req->rq_flags & RQF_DONTPREP)
scsi_mq_uninit_cmd(cmd);
break;
default:
if (unlikely(!scsi_device_online(sdev)))
- scsi_req(req)->result = DID_NO_CONNECT << 16;
+ cmd->result = DID_NO_CONNECT << 16;
else
- scsi_req(req)->result = DID_ERROR << 16;
+ cmd->result = DID_ERROR << 16;
/*
* Make sure to release all allocated resources when
* we hit an error, as we will never see this command
@@ -1753,14 +1805,6 @@ out_put_budget:
return ret;
}
-static enum blk_eh_timer_return scsi_timeout(struct request *req,
- bool reserved)
-{
- if (reserved)
- return BLK_EH_RESET_TIMER;
- return scsi_times_out(req);
-}
-
static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
@@ -1773,7 +1817,6 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
- cmd->req.sense = cmd->sense_buffer;
if (scsi_host_get_prot(shost)) {
sg = (void *)cmd + sizeof(struct scsi_cmnd) +
@@ -1821,13 +1864,13 @@ static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
return 0;
}
-static int scsi_map_queues(struct blk_mq_tag_set *set)
+static void scsi_map_queues(struct blk_mq_tag_set *set)
{
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
- return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
@@ -1848,10 +1891,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- if (dev->dma_mask) {
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) >> SECTOR_SHIFT);
- }
blk_queue_max_hw_sectors(q, shost->max_sectors);
blk_queue_segment_boundary(q, shost->dma_boundary);
dma_set_seg_boundary(dev, shost->dma_boundary);
@@ -1941,7 +1980,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->nr_maps = shost->nr_maps ? : 1;
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
- tag_set->numa_node = NUMA_NO_NODE;
+ tag_set->numa_node = dev_to_node(shost->dma_dev);
tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
@@ -1952,9 +1991,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
return blk_mq_alloc_tag_set(tag_set);
}
-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+void scsi_mq_free_tags(struct kref *kref)
{
+ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
+ tagset_refcnt);
+
blk_mq_free_tag_set(&shost->tag_set);
+ complete(&shost->tagset_freed);
}
/**
@@ -2026,7 +2069,6 @@ void scsi_exit_queue(void)
* @sdev: SCSI device to be queried
* @pf: Page format bit (1 == standard, 0 == vendor specific)
* @sp: Save page bit (0 == don't save, 1 == save)
- * @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
* @timeout: command timeout
@@ -2039,10 +2081,9 @@ void scsi_exit_queue(void)
* status on error
*
*/
-int
-scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
- unsigned char *buffer, int len, int timeout, int retries,
- struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
+int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
+ unsigned char *buffer, int len, int timeout, int retries,
+ struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
unsigned char cmd[10];
unsigned char *real_buffer;
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index ed9572252a42..b02af340c2d3 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -30,7 +30,9 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
- return rq->rq_disk ? rq->rq_disk->disk_name : NULL;
+ if (!rq->q || !rq->q->disk)
+ return NULL;
+ return rq->q->disk->disk_name;
}
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
@@ -85,7 +87,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd,
char *logbuf;
size_t off = 0, logbuf_len;
- if (!scmd || !scmd->cmnd)
+ if (!scmd)
return;
logbuf = scsi_log_reserve_buffer(&logbuf_len);
@@ -181,9 +183,6 @@ void scsi_print_command(struct scsi_cmnd *cmd)
char *logbuf;
size_t off, logbuf_len;
- if (!cmd->cmnd)
- return;
-
logbuf = scsi_log_reserve_buffer(&logbuf_len);
if (!logbuf)
return;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b5a858c29488..d581613d87c7 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -8,7 +8,6 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/async.h>
#include <linux/blk-pm.h>
#include <scsi/scsi.h>
@@ -181,7 +180,7 @@ static int sdev_runtime_resume(struct device *dev)
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
+ blk_post_runtime_resume(sdev->request_queue);
return err;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a278fc8948f4..c52de9a973e4 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -3,7 +3,6 @@
#define _SCSI_PRIV_H
#include <linux/device.h>
-#include <linux/async.h>
#include <scsi/scsi_device.h>
#include <linux/sbitmap.h>
@@ -20,6 +19,17 @@ struct scsi_nl_hdr;
#define SCSI_CMD_RETRIES_NO_LIMIT -1
/*
+ * Error codes used by scsi-ml internally. These must not be used by drivers.
+ */
+enum scsi_ml_status {
+ SCSIML_STAT_OK = 0x00,
+ SCSIML_STAT_RESV_CONFLICT = 0x01, /* Reservation conflict */
+ SCSIML_STAT_NOSPC = 0x02, /* Space allocation on the dev failed */
+ SCSIML_STAT_MED_ERROR = 0x03, /* Medium error */
+ SCSIML_STAT_TGT_FAILURE = 0x04, /* Permanent target failure */
+};
+
+/*
* Scsi Error Handler Flags
*/
#define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */
@@ -73,7 +83,7 @@ extern void scsi_exit_devinfo(void);
/* scsi_error.c */
extern void scmd_eh_abort_handler(struct work_struct *work);
-extern enum blk_eh_timer_return scsi_times_out(struct request *req);
+extern enum blk_eh_timer_return scsi_timeout(struct request *req);
extern int scsi_error_handler(void *host);
extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
extern void scsi_eh_wakeup(struct Scsi_Host *shost);
@@ -83,7 +93,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
struct list_head *done_q);
int scsi_eh_get_sense(struct list_head *work_q,
struct list_head *done_q);
-int scsi_noretry_cmd(struct scsi_cmnd *scmd);
+bool scsi_noretry_cmd(struct scsi_cmnd *scmd);
void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
@@ -95,7 +105,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
+extern void scsi_mq_free_tags(struct kref *kref);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
@@ -144,7 +154,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
-extern const struct attribute_group scsi_shost_attr_group;
+extern const struct attribute_group *scsi_shost_groups[];
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index d6982d355739..95aee1ad1383 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -49,7 +49,7 @@ static DEFINE_MUTEX(global_host_template_mutex);
static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct Scsi_Host *shost = PDE_DATA(file_inode(file));
+ struct Scsi_Host *shost = pde_data(file_inode(file));
ssize_t ret = -ENOMEM;
char *page;
@@ -79,7 +79,7 @@ static int proc_scsi_show(struct seq_file *m, void *v)
static int proc_scsi_host_open(struct inode *inode, struct file *file)
{
- return single_open_size(file, proc_scsi_show, PDE_DATA(inode),
+ return single_open_size(file, proc_scsi_show, pde_data(inode),
4 * PAGE_SIZE);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 23e1c0acdeae..5d27f5196de6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
+static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
S_IRUGO|S_IWUSR);
@@ -122,7 +122,7 @@ struct async_scan_data {
struct completion prev_finished;
};
-/**
+/*
* scsi_enable_async_suspend - Enable async suspend and resume
*/
void scsi_enable_async_suspend(struct device *dev)
@@ -214,6 +214,53 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
SCSI_TIMEOUT, 3, NULL);
}
+static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ unsigned int depth)
+{
+ int new_shift = sbitmap_calculate_shift(depth);
+ bool need_alloc = !sdev->budget_map.map;
+ bool need_free = false;
+ int ret;
+ struct sbitmap sb_backup;
+
+ depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->slave_configure
+ */
+ if (!need_alloc && new_shift != sdev->budget_map.shift)
+ need_alloc = need_free = true;
+
+ if (!need_alloc)
+ return 0;
+
+ /*
+ * Request queue has to be frozen for reallocating budget map,
+ * and here disk isn't added yet, so freezing is pretty fast
+ */
+ if (need_free) {
+ blk_mq_freeze_queue(sdev->request_queue);
+ sb_backup = sdev->budget_map;
+ }
+ ret = sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_KERNEL,
+ sdev->request_queue->node, false, true);
+ if (!ret)
+ sbitmap_resize(&sdev->budget_map, depth);
+
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+ else
+ sbitmap_free(&sb_backup);
+ ret = 0;
+ blk_mq_unfreeze_queue(sdev->request_queue);
+ }
+ return ret;
+}
+
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
@@ -293,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
+ kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
@@ -306,11 +354,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
- if (sbitmap_init_node(&sdev->budget_map,
- scsi_device_max_queue_depth(sdev),
- sbitmap_calculate_shift(depth),
- GFP_KERNEL, sdev->request_queue->node,
- false, true)) {
+ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
put_device(&starget->dev);
kfree(sdev);
goto out;
@@ -690,7 +734,17 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (pass == 1) {
if (BLIST_INQUIRY_36 & *bflags)
next_inquiry_len = 36;
- else if (sdev->inquiry_len)
+ /*
+ * LLD specified a maximum sdev->inquiry_len
+ * but device claims it has more data. Capping
+ * the length only makes sense for legacy
+ * devices. If a device supports SPC-4 (2014)
+ * or newer, assume that it is safe to ask for
+ * as much as the device says it supports.
+ */
+ else if (sdev->inquiry_len &&
+ response_len > sdev->inquiry_len &&
+ (inq_result[2] & 0x7) < 6) /* SPC-4 */
next_inquiry_len = sdev->inquiry_len;
else
next_inquiry_len = response_len;
@@ -1017,6 +1071,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
return SCSI_SCAN_NO_RESPONSE;
}
+
+ /*
+ * The queue_depth is often changed in ->slave_configure.
+ * Set up budget map again since memory consumption of
+ * the map depends on actual queue depth.
+ */
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
}
if (sdev->scsi_level >= SCSI_3)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d4edce930a4a..cac7c902cf70 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -424,10 +424,15 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
NULL
};
-const struct attribute_group scsi_shost_attr_group = {
+static const struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
+const struct attribute_group *scsi_shost_groups[] = {
+ &scsi_shost_attr_group,
+ NULL
+};
+
static void scsi_device_cls_release(struct device *class_dev)
{
struct scsi_device *sdev;
@@ -443,6 +448,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
+ struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
struct module *mod;
@@ -485,6 +491,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb0 = rcu_replace_pointer(sdev->vpd_pgb0, vpd_pgb0,
+ lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb1 = rcu_replace_pointer(sdev->vpd_pgb1, vpd_pgb1,
+ lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg0)
@@ -495,6 +507,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree_rcu(vpd_pg80, rcu);
if (vpd_pg89)
kfree_rcu(vpd_pg89, rcu);
+ if (vpd_pgb0)
+ kfree_rcu(vpd_pgb0, rcu);
+ if (vpd_pgb1)
+ kfree_rcu(vpd_pgb1, rcu);
+ if (vpd_pgb2)
+ kfree_rcu(vpd_pgb2, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -555,7 +573,6 @@ struct bus_type scsi_bus_type = {
.pm = &scsi_bus_pm_ops,
#endif
};
-EXPORT_SYMBOL_GPL(scsi_bus_type);
int scsi_sysfs_register(void)
{
@@ -811,6 +828,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
}
mutex_lock(&sdev->state_mutex);
+ switch (sdev->sdev_state) {
+ case SDEV_RUNNING:
+ case SDEV_OFFLINE:
+ break;
+ default:
+ mutex_unlock(&sdev->state_mutex);
+ return -EINVAL;
+ }
if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
ret = 0;
} else {
@@ -908,6 +933,9 @@ static struct bin_attribute dev_attr_vpd_##_page = { \
sdev_vpd_pg_attr(pg83);
sdev_vpd_pg_attr(pg80);
sdev_vpd_pg_attr(pg89);
+sdev_vpd_pg_attr(pgb0);
+sdev_vpd_pg_attr(pgb1);
+sdev_vpd_pg_attr(pgb2);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
@@ -956,6 +984,7 @@ static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
show_sdev_iostat(iorequest_cnt);
show_sdev_iostat(iodone_cnt);
show_sdev_iostat(ioerr_cnt);
+show_sdev_iostat(iotmo_cnt);
static ssize_t
sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1223,14 +1252,6 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
!sdev->host->hostt->change_queue_depth)
return 0;
-#ifdef CONFIG_SCSI_DH
- if (attr == &dev_attr_access_state.attr &&
- !sdev->handler)
- return 0;
- if (attr == &dev_attr_preferred_path.attr &&
- !sdev->handler)
- return 0;
-#endif
return attr->mode;
}
@@ -1253,6 +1274,15 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89)
return 0;
+ if (attr == &dev_attr_vpd_pgb0 && !sdev->vpd_pgb0)
+ return 0;
+
+ if (attr == &dev_attr_vpd_pgb1 && !sdev->vpd_pgb1)
+ return 0;
+
+ if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
+ return 0;
+
return S_IRUGO;
}
@@ -1274,6 +1304,7 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_iorequest_cnt.attr,
&dev_attr_iodone_cnt.attr,
&dev_attr_ioerr_cnt.attr,
+ &dev_attr_iotmo_cnt.attr,
&dev_attr_modalias.attr,
&dev_attr_queue_depth.attr,
&dev_attr_queue_type.attr,
@@ -1299,6 +1330,9 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pg83,
&dev_attr_vpd_pg80,
&dev_attr_vpd_pg89,
+ &dev_attr_vpd_pgb0,
+ &dev_attr_vpd_pgb1,
+ &dev_attr_vpd_pgb2,
&dev_attr_inquiry,
NULL
};
@@ -1387,10 +1421,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
sdev->bsg_dev = scsi_bsg_register_queue(sdev);
if (IS_ERR(sdev->bsg_dev)) {
- /*
- * We're treating error on bsg register as non-fatal, so
- * pretend nothing went wrong.
- */
error = PTR_ERR(sdev->bsg_dev);
sdev_printk(KERN_INFO, sdev,
"Failed to register bsg queue, errno=%d\n",
@@ -1455,7 +1485,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
scsi_device_set_state(sdev, SDEV_DEL);
mutex_unlock(&sdev->state_mutex);
- blk_cleanup_queue(sdev->request_queue);
+ blk_mq_destroy_queue(sdev->request_queue);
+ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 60e406bcf42a..8934160c4a33 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -34,7 +34,7 @@ static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
static void fc_bsg_remove(struct request_queue *);
static void fc_bsg_goose_queue(struct fc_rport *);
-static void fc_li_stats_update(struct fc_fn_li_desc *li_desc,
+static void fc_li_stats_update(u16 event_type,
struct fc_fpin_stats *stats);
static void fc_delivery_stats_update(u32 reason_code,
struct fc_fpin_stats *stats);
@@ -543,7 +543,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
struct nlmsghdr *nlh;
struct fc_nl_event *event;
const char *name;
- u32 len;
+ size_t len, padding;
int err;
if (!data_buf || data_len < 4)
@@ -554,7 +554,7 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
goto send_fail;
}
- len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
+ len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len);
skb = nlmsg_new(len, GFP_KERNEL);
if (!skb) {
@@ -578,7 +578,9 @@ fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
event->event_num = event_number;
event->event_code = event_code;
if (data_len)
- memcpy(&event->event_data, data_buf, data_len);
+ memcpy(event->event_data_flex, data_buf, data_len);
+ padding = len - offsetof(typeof(*event), event_data_flex) - data_len;
+ memset(event->event_data_flex + data_len, 0, padding);
nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
GFP_KERNEL);
@@ -670,42 +672,34 @@ fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn)
EXPORT_SYMBOL(fc_find_rport_by_wwpn);
static void
-fc_li_stats_update(struct fc_fn_li_desc *li_desc,
+fc_li_stats_update(u16 event_type,
struct fc_fpin_stats *stats)
{
- stats->li += be32_to_cpu(li_desc->event_count);
- switch (be16_to_cpu(li_desc->event_type)) {
+ stats->li++;
+ switch (event_type) {
case FPIN_LI_UNKNOWN:
- stats->li_failure_unknown +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_failure_unknown++;
break;
case FPIN_LI_LINK_FAILURE:
- stats->li_link_failure_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_link_failure_count++;
break;
case FPIN_LI_LOSS_OF_SYNC:
- stats->li_loss_of_sync_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_loss_of_sync_count++;
break;
case FPIN_LI_LOSS_OF_SIG:
- stats->li_loss_of_signals_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_loss_of_signals_count++;
break;
case FPIN_LI_PRIM_SEQ_ERR:
- stats->li_prim_seq_err_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_prim_seq_err_count++;
break;
case FPIN_LI_INVALID_TX_WD:
- stats->li_invalid_tx_word_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_invalid_tx_word_count++;
break;
case FPIN_LI_INVALID_CRC:
- stats->li_invalid_crc_count +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_invalid_crc_count++;
break;
case FPIN_LI_DEVICE_SPEC:
- stats->li_device_specific +=
- be32_to_cpu(li_desc->event_count);
+ stats->li_device_specific++;
break;
}
}
@@ -767,6 +761,7 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
struct fc_rport *attach_rport = NULL;
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
+ u16 event_type = be16_to_cpu(li_desc->event_type);
u64 wwpn;
rport = fc_find_rport_by_wwpn(shost,
@@ -775,7 +770,7 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
(rport->roles & FC_PORT_ROLE_FCP_TARGET ||
rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
attach_rport = rport;
- fc_li_stats_update(li_desc, &attach_rport->fpin_stats);
+ fc_li_stats_update(event_type, &attach_rport->fpin_stats);
}
if (be32_to_cpu(li_desc->pname_count) > 0) {
@@ -789,14 +784,14 @@ fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
if (rport == attach_rport)
continue;
- fc_li_stats_update(li_desc,
+ fc_li_stats_update(event_type,
&rport->fpin_stats);
}
}
}
if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
- fc_li_stats_update(li_desc, &fc_host->fpin_stats);
+ fc_li_stats_update(event_type, &fc_host->fpin_stats);
}
/*
@@ -1177,7 +1172,7 @@ static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
return 0;
}
-fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
+fc_rport_show_function(dev_loss_tmo, "%u\n", 20, )
static ssize_t
store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 554b6f784223..cd3db9684e52 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,8 +86,10 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
-static struct workqueue_struct *iscsi_eh_timer_workq;
static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -169,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, ep->id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
kfree(ep);
}
@@ -181,7 +188,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+ return sysfs_emit(buf, "%d\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -194,48 +201,37 @@ static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- const uint64_t *epid = data;
-
- return *epid == ep->id;
-}
-
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
- struct device *dev;
struct iscsi_endpoint *ep;
- uint64_t id;
- int err;
-
- for (id = 1; id < ISCSI_MAX_EPID; id++) {
- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
- iscsi_match_epid);
- if (!dev)
- break;
- else
- put_device(dev);
- }
- if (id == ISCSI_MAX_EPID) {
- printk(KERN_ERR "Too many connections. Max supported %u\n",
- ISCSI_MAX_EPID - 1);
- return NULL;
- }
+ int err, id;
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+
+ /*
+ * First endpoint id should be 1 to comply with user space
+ * applications (iscsid).
+ */
+ id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
+ if (id < 0) {
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+ id);
+ goto free_ep;
+ }
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+ dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
- goto free_ep;
+ goto free_id;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -249,6 +245,10 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
+free_id:
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
free_ep:
kfree(ep);
return NULL;
@@ -276,14 +276,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
*/
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct device *dev;
+ struct iscsi_endpoint *ep;
- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
- iscsi_match_epid);
- if (!dev)
- return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ ep = idr_find(&iscsi_ep_idr, handle);
+ if (!ep)
+ goto unlock;
- return iscsi_dev_to_endpoint(dev);
+ get_device(&ep->dev);
+unlock:
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -1557,7 +1560,6 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
struct iscsi_cls_host *ihost = shost->shost_data;
memset(ihost, 0, sizeof(*ihost));
- atomic_set(&ihost->nr_scans, 0);
mutex_init(&ihost->mutex);
iscsi_bsg_host_add(shost, ihost);
@@ -1744,25 +1746,6 @@ void iscsi_host_for_each_session(struct Scsi_Host *shost,
}
EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
-/**
- * iscsi_scan_finished - helper to report when running scans are done
- * @shost: scsi host
- * @time: scan run time
- *
- * This function can be used by drives like qla4xxx to report to the scsi
- * layer when the scans it kicked off at module load time are done.
- */
-int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
-{
- struct iscsi_cls_host *ihost = shost->shost_data;
- /*
- * qla4xxx will have kicked off some session unblocks before calling
- * scsi_scan_host, so just wait for them to complete.
- */
- return !atomic_read(&ihost->nr_scans);
-}
-EXPORT_SYMBOL_GPL(iscsi_scan_finished);
-
struct iscsi_scan_data {
unsigned int channel;
unsigned int id;
@@ -1831,8 +1814,6 @@ static void iscsi_scan_session(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session, scan_work);
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_cls_host *ihost = shost->shost_data;
struct iscsi_scan_data scan_data;
scan_data.channel = 0;
@@ -1841,7 +1822,6 @@ static void iscsi_scan_session(struct work_struct *work)
scan_data.rescan = SCSI_SCAN_RESCAN;
iscsi_user_scan_session(&session->dev, &scan_data);
- atomic_dec(&ihost->nr_scans);
}
/**
@@ -1912,30 +1892,16 @@ static void __iscsi_unblock_session(struct work_struct *work)
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
unblock_work);
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_cls_host *ihost = shost->shost_data;
unsigned long flags;
ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
- /*
- * The recovery and unblock work get run from the same workqueue,
- * so try to cancel it if it was going to run after this unblock.
- */
- cancel_delayed_work(&session->recovery_work);
+
+ cancel_delayed_work_sync(&session->recovery_work);
spin_lock_irqsave(&session->lock, flags);
session->state = ISCSI_SESSION_LOGGED_IN;
spin_unlock_irqrestore(&session->lock, flags);
/* start IO */
scsi_target_unblock(&session->dev, SDEV_RUNNING);
- /*
- * Only do kernel scanning if the driver is properly hooked into
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
- if (shost->hostt->scan_finished) {
- if (scsi_queue_work(shost, &session->scan_work))
- atomic_inc(&ihost->nr_scans);
- }
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
}
@@ -1947,9 +1913,10 @@ static void __iscsi_unblock_session(struct work_struct *work)
*/
void iscsi_unblock_session(struct iscsi_cls_session *session)
{
- flush_work(&session->block_work);
+ if (!cancel_work_sync(&session->block_work))
+ cancel_delayed_work_sync(&session->recovery_work);
- queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+ queue_work(session->workq, &session->unblock_work);
/*
* Blocking the session can be done from any context so we only
* queue the block work. Make sure the unblock work has completed
@@ -1973,14 +1940,14 @@ static void __iscsi_block_session(struct work_struct *work)
scsi_target_block(&session->dev);
ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
if (session->recovery_tmo >= 0)
- queue_delayed_work(iscsi_eh_timer_workq,
+ queue_delayed_work(session->workq,
&session->recovery_work,
session->recovery_tmo * HZ);
}
void iscsi_block_session(struct iscsi_cls_session *session)
{
- queue_work(iscsi_eh_timer_workq, &session->block_work);
+ queue_work(session->workq, &session->block_work);
}
EXPORT_SYMBOL_GPL(iscsi_block_session);
@@ -2013,7 +1980,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
scsi_remove_target(&session->dev);
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, target_id);
+ ida_free(&iscsi_sess_ida, target_id);
unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
@@ -2068,19 +2035,27 @@ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
unsigned long flags;
int id = 0;
int err;
session->sid = atomic_add_return(1, &iscsi_session_nr);
+ session->workq = alloc_workqueue("iscsi_ctrl_%d:%d",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
+ shost->host_no, session->sid);
+ if (!session->workq)
+ return -ENOMEM;
+
if (target_id == ISCSI_MAX_TARGET) {
- id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&iscsi_sess_ida, GFP_KERNEL);
if (id < 0) {
iscsi_cls_session_printk(KERN_ERR, session,
"Failure in Target ID Allocation\n");
- return id;
+ err = id;
+ goto destroy_wq;
}
session->target_id = (unsigned int)id;
session->ida_used = true;
@@ -2113,8 +2088,9 @@ release_dev:
device_del(&session->dev);
release_ida:
if (session->ida_used)
- ida_simple_remove(&iscsi_sess_ida, session->target_id);
-
+ ida_free(&iscsi_sess_ida, session->target_id);
+destroy_wq:
+ destroy_workqueue(session->workq);
return err;
}
EXPORT_SYMBOL_GPL(iscsi_add_session);
@@ -2165,7 +2141,9 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
{
if (!iscsi_is_conn_dev(dev))
return 0;
- return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+
+ iscsi_remove_conn(iscsi_dev_to_conn(dev));
+ return 0;
}
void iscsi_remove_session(struct iscsi_cls_session *session)
@@ -2180,9 +2158,9 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
list_del(&session->sess_list);
spin_unlock_irqrestore(&sesslock, flags);
- flush_work(&session->block_work);
- flush_work(&session->unblock_work);
- cancel_delayed_work_sync(&session->recovery_work);
+ if (!cancel_work_sync(&session->block_work))
+ cancel_delayed_work_sync(&session->recovery_work);
+ cancel_work_sync(&session->unblock_work);
/*
* If we are blocked let commands flow again. The lld or iscsi
* layer should set up the queuecommand to fail commands.
@@ -2194,7 +2172,10 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
spin_unlock_irqrestore(&session->lock, flags);
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
- /* flush running scans then delete devices */
+ /*
+ * qla4xxx can perform it's own scans when it runs in kernel only
+ * mode. Make sure to flush those scans.
+ */
flush_work(&session->scan_work);
/* flush running unbind operations */
flush_work(&session->unbind_work);
@@ -2210,6 +2191,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
transport_unregister_device(&session->dev);
+ destroy_workqueue(session->workq);
+
ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
device_del(&session->dev);
}
@@ -2221,10 +2204,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
break;
case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2236,18 +2219,63 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
}
-static int iscsi_if_stop_conn(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
{
- int flag = ev->u.stop_conn.flag;
- struct iscsi_cls_conn *conn;
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
- conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid);
- if (!conn)
- return -EINVAL;
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+ struct iscsi_endpoint *ep,
+ bool is_active)
+{
+ /* Check if this was a conn error and the kernel took ownership */
+ spin_lock_irq(&conn->lock);
+ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_ep_disconnect(conn, is_active);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ mutex_unlock(&conn->ep_mutex);
+
+ flush_work(&conn->cleanup_work);
+ /*
+ * Userspace is now done with the EP so we can release the ref
+ * iscsi_cleanup_conn_work_fn took.
+ */
+ iscsi_put_endpoint(ep);
+ mutex_lock(&conn->ep_mutex);
+ }
+}
+
+static int iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
+{
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n");
/*
+ * For offload, iscsid may not know about the ep like when iscsid is
+ * restarted or for kernel based session shutdown iscsid is not even
+ * up. For these cases, we do the disconnect now.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* If this is a termination we have to call stop_conn with that flag
* so the correct states get set. If we haven't run the work yet try to
* avoid the extra run.
@@ -2259,9 +2287,12 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
/*
* Figure out if it was the kernel or userspace initiating this.
*/
+ spin_lock_irq(&conn->lock);
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
iscsi_stop_conn(conn, flag);
} else {
+ spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn,
"flush kernel conn cleanup.\n");
flush_work(&conn->cleanup_work);
@@ -2270,31 +2301,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
* Only clear for recovery to avoid extra cleanup runs during
* termination.
*/
+ spin_lock_irq(&conn->lock);
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ spin_unlock_irq(&conn->lock);
}
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
return 0;
}
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
- struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
- struct iscsi_endpoint *ep;
-
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
- conn->state = ISCSI_CONN_FAILED;
-
- if (!conn->ep || !session->transport->ep_disconnect)
- return;
-
- ep = conn->ep;
- conn->ep = NULL;
-
- session->transport->unbind_conn(conn, is_active);
- session->transport->ep_disconnect(ep);
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2303,18 +2317,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
mutex_lock(&conn->ep_mutex);
/*
- * If we are not at least bound there is nothing for us to do. Userspace
- * will do a ep_disconnect call if offload is used, but will not be
- * doing a stop since there is nothing to clean up, so we have to clear
- * the cleanup bit here.
+ * Get a ref to the ep, so we don't release its ID until after
+ * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
*/
- if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
- ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
- clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
- mutex_unlock(&conn->ep_mutex);
- return;
- }
-
+ if (conn->ep)
+ get_device(&conn->ep->dev);
iscsi_ep_disconnect(conn, false);
if (system_state != SYSTEM_RUNNING) {
@@ -2332,6 +2339,55 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n");
}
+static int iscsi_iter_force_destroy_conn_fn(struct device *dev, void *data)
+{
+ struct iscsi_transport *transport;
+ struct iscsi_cls_conn *conn;
+
+ if (!iscsi_is_conn_dev(dev))
+ return 0;
+
+ conn = iscsi_dev_to_conn(dev);
+ transport = conn->transport;
+
+ if (READ_ONCE(conn->state) != ISCSI_CONN_DOWN)
+ iscsi_if_stop_conn(conn, STOP_CONN_TERM);
+
+ transport->destroy_conn(conn);
+ return 0;
+}
+
+/**
+ * iscsi_force_destroy_session - destroy a session from the kernel
+ * @session: session to destroy
+ *
+ * Force the destruction of a session from the kernel. This should only be
+ * used when userspace is no longer running during system shutdown.
+ */
+void iscsi_force_destroy_session(struct iscsi_cls_session *session)
+{
+ struct iscsi_transport *transport = session->transport;
+ unsigned long flags;
+
+ WARN_ON_ONCE(system_state == SYSTEM_RUNNING);
+
+ spin_lock_irqsave(&sesslock, flags);
+ if (list_empty(&session->sess_list)) {
+ spin_unlock_irqrestore(&sesslock, flags);
+ /*
+ * Conn/ep is already freed. Session is being torn down via
+ * async path. For shutdown we don't care about it so return.
+ */
+ return;
+ }
+ spin_unlock_irqrestore(&sesslock, flags);
+
+ device_for_each_child(&session->dev, NULL,
+ iscsi_iter_force_destroy_conn_fn);
+ transport->destroy_session(session);
+}
+EXPORT_SYMBOL_GPL(iscsi_force_destroy_session);
+
void iscsi_free_session(struct iscsi_cls_session *session)
{
ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
@@ -2341,27 +2397,16 @@ void iscsi_free_session(struct iscsi_cls_session *session)
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
- * iscsi_create_conn - create iscsi class connection
+ * iscsi_alloc_conn - alloc iscsi class connection
* @session: iscsi cls session
* @dd_size: private driver data size
* @cid: connection id
- *
- * This can be called from a LLD or iscsi_transport. The connection
- * is child of the session so cid must be unique for all connections
- * on the session.
- *
- * Since we do not support MCS, cid will normally be zero. In some cases
- * for software iscsi we could be trying to preallocate a connection struct
- * in which case there could be two connection structs and cid would be
- * non-zero.
*/
struct iscsi_cls_conn *
-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
{
struct iscsi_transport *transport = session->transport;
struct iscsi_cls_conn *conn;
- unsigned long flags;
- int err;
conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
if (!conn)
@@ -2370,59 +2415,73 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
+ spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
goto free_conn;
dev_set_name(&conn->dev, "connection%d:%u", session->sid, cid);
+ device_initialize(&conn->dev);
conn->dev.parent = &session->dev;
conn->dev.release = iscsi_conn_release;
- err = device_register(&conn->dev);
+
+ return conn;
+
+free_conn:
+ kfree(conn);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_alloc_conn);
+
+/**
+ * iscsi_add_conn - add iscsi class connection
+ * @conn: iscsi cls connection
+ *
+ * This will expose iscsi_cls_conn to sysfs so make sure the related
+ * resources for sysfs attributes are initialized before calling this.
+ */
+int iscsi_add_conn(struct iscsi_cls_conn *conn)
+{
+ int err;
+ unsigned long flags;
+ struct iscsi_cls_session *session = iscsi_dev_to_session(conn->dev.parent);
+
+ err = device_add(&conn->dev);
if (err) {
- iscsi_cls_session_printk(KERN_ERR, session, "could not "
- "register connection's dev\n");
- goto release_parent_ref;
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register connection's dev\n");
+ return err;
}
err = transport_register_device(&conn->dev);
if (err) {
- iscsi_cls_session_printk(KERN_ERR, session, "could not "
- "register transport's dev\n");
- goto release_conn_ref;
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register transport's dev\n");
+ device_del(&conn->dev);
+ return err;
}
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
spin_unlock_irqrestore(&connlock, flags);
- ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
- return conn;
-
-release_conn_ref:
- device_unregister(&conn->dev);
- put_device(&session->dev);
- return NULL;
-release_parent_ref:
- put_device(&session->dev);
-free_conn:
- kfree(conn);
- return NULL;
+ return 0;
}
-
-EXPORT_SYMBOL_GPL(iscsi_create_conn);
+EXPORT_SYMBOL_GPL(iscsi_add_conn);
/**
- * iscsi_destroy_conn - destroy iscsi class connection
- * @conn: iscsi cls session
+ * iscsi_remove_conn - remove iscsi class connection from sysfs
+ * @conn: iscsi cls connection
*
- * This can be called from a LLD or iscsi_transport.
+ * Remove iscsi_cls_conn from sysfs, and wait for previous
+ * read/write of iscsi_cls_conn's attributes in sysfs to finish.
*/
-int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+void iscsi_remove_conn(struct iscsi_cls_conn *conn)
{
unsigned long flags;
@@ -2431,11 +2490,9 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
spin_unlock_irqrestore(&connlock, flags);
transport_unregister_device(&conn->dev);
- ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
- device_unregister(&conn->dev);
- return 0;
+ device_del(&conn->dev);
}
-EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+EXPORT_SYMBOL_GPL(iscsi_remove_conn);
void iscsi_put_conn(struct iscsi_cls_conn *conn)
{
@@ -2561,9 +2618,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+ int state;
- if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
- queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+ spin_lock_irqsave(&conn->lock, flags);
+ /*
+ * Userspace will only do a stop call if we are at least bound. And, we
+ * only need to do the in kernel cleanup if in the UP state so cmds can
+ * be released to upper layers. If in other states just wait for
+ * userspace to avoid races that can leave the cleanup_work queued.
+ */
+ state = READ_ONCE(conn->state);
+ switch (state) {
+ case ISCSI_CONN_BOUND:
+ case ISCSI_CONN_UP:
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+ &conn->flags)) {
+ queue_work(iscsi_conn_cleanup_workq,
+ &conn->cleanup_work);
+ }
+ break;
+ default:
+ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+ state);
+ break;
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2913,7 +2993,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- int err = 0, value = 0;
+ int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@@ -2930,8 +3010,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- if ((conn->state == ISCSI_CONN_BOUND) ||
- (conn->state == ISCSI_CONN_UP)) {
+ state = READ_ONCE(conn->state);
+ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@@ -3003,16 +3083,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
}
mutex_lock(&conn->ep_mutex);
- /* Check if this was a conn error and the kernel took ownership */
- if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
- ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
- mutex_unlock(&conn->ep_mutex);
-
- flush_work(&conn->cleanup_work);
- goto put_ep;
- }
-
- iscsi_ep_disconnect(conn, false);
+ iscsi_if_disconnect_bound_ep(conn, ep, false);
mutex_unlock(&conn->ep_mutex);
put_ep:
iscsi_put_endpoint(ep);
@@ -3688,7 +3759,12 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_DESTROY_CONN:
return iscsi_if_destroy_conn(transport, ev);
case ISCSI_UEVENT_STOP_CONN:
- return iscsi_if_stop_conn(transport, ev);
+ conn = iscsi_conn_lookup(ev->u.stop_conn.sid,
+ ev->u.stop_conn.cid);
+ if (!conn)
+ return -EINVAL;
+
+ return iscsi_if_stop_conn(conn, ev->u.stop_conn.flag);
}
/*
@@ -3715,24 +3791,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
return -EINVAL;
mutex_lock(&conn->ep_mutex);
+ spin_lock_irq(&conn->lock);
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
mutex_unlock(&conn->ep_mutex);
ev->r.retcode = -ENOTCONN;
return 0;
}
+ spin_unlock_irq(&conn->lock);
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_BIND_CONN:
- if (conn->ep) {
- /*
- * For offload boot support where iscsid is restarted
- * during the pivot root stage, the ep will be intact
- * here when the new iscsid instance starts up and
- * reconnects.
- */
- iscsi_ep_disconnect(conn, true);
- }
-
session = iscsi_session_lookup(ev->u.b_conn.sid);
if (!session) {
err = -EINVAL;
@@ -3743,7 +3812,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
+ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3762,7 +3831,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_START_CONN:
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
+ WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
break;
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -3866,8 +3936,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_UNBIND_SESSION:
session = iscsi_session_lookup(ev->u.d_session.sid);
if (session)
- scsi_queue_work(iscsi_session_to_shost(session),
- &session->unbind_work);
+ queue_work(session->workq, &session->unbind_work);
else
err = -EINVAL;
break;
@@ -4070,10 +4139,11 @@ static ssize_t show_conn_state(struct device *dev,
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
+ int conn_state = READ_ONCE(conn->state);
- if (conn->state >= 0 &&
- conn->state < ARRAY_SIZE(connection_state_names))
- state = connection_state_names[conn->state];
+ if (conn_state >= 0 &&
+ conn_state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}
@@ -4740,7 +4810,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
INIT_LIST_HEAD(&priv->list);
priv->iscsi_transport = tt;
priv->t.user_scan = iscsi_user_scan;
- priv->t.create_work_queue = 1;
priv->dev.class = &iscsi_transport_class;
dev_set_name(&priv->dev, "%s", tt->name);
@@ -4787,7 +4856,7 @@ free_priv:
}
EXPORT_SYMBOL_GPL(iscsi_register_transport);
-int iscsi_unregister_transport(struct iscsi_transport *tt)
+void iscsi_unregister_transport(struct iscsi_transport *tt)
{
struct iscsi_internal *priv;
unsigned long flags;
@@ -4810,8 +4879,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
device_unregister(&priv->dev);
mutex_unlock(&rx_queue_mutex);
-
- return 0;
}
EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
@@ -4875,26 +4942,16 @@ static __init int iscsi_transport_init(void)
goto unregister_flashnode_bus;
}
- iscsi_eh_timer_workq = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, "iscsi_eh");
- if (!iscsi_eh_timer_workq) {
- err = -ENOMEM;
- goto release_nls;
- }
-
iscsi_conn_cleanup_workq = alloc_workqueue("%s",
WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
"iscsi_conn_cleanup");
if (!iscsi_conn_cleanup_workq) {
err = -ENOMEM;
- goto destroy_wq;
+ goto release_nls;
}
return 0;
-destroy_wq:
- destroy_workqueue(iscsi_eh_timer_workq);
release_nls:
netlink_kernel_release(nls);
unregister_flashnode_bus:
@@ -4917,7 +4974,6 @@ unregister_transport_class:
static void __exit iscsi_transport_exit(void)
{
destroy_workqueue(iscsi_conn_cleanup_workq);
- destroy_workqueue(iscsi_eh_timer_workq);
netlink_kernel_release(nls);
bus_unregister(&iscsi_flashnode_bus);
transport_class_unregister(&iscsi_connection_class);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 4ee578b181da..74b99f2b0b74 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -34,7 +34,6 @@
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_request.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -226,6 +225,7 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
{
struct Scsi_Host *shost = dev_to_shost(dev);
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+ struct device *dma_dev = shost->dma_dev;
INIT_LIST_HEAD(&sas_host->rphy_list);
mutex_init(&sas_host->lock);
@@ -237,6 +237,11 @@ static int sas_host_setup(struct transport_container *tc, struct device *dev,
dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n",
shost->host_no);
+ if (dma_dev->dma_mask) {
+ shost->opt_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT);
+ }
+
return 0;
}
@@ -717,12 +722,17 @@ int sas_phy_add(struct sas_phy *phy)
int error;
error = device_add(&phy->dev);
- if (!error) {
- transport_add_device(&phy->dev);
- transport_configure_device(&phy->dev);
+ if (error)
+ return error;
+
+ error = transport_add_device(&phy->dev);
+ if (error) {
+ device_del(&phy->dev);
+ return error;
}
+ transport_configure_device(&phy->dev);
- return error;
+ return 0;
}
EXPORT_SYMBOL(sas_phy_add);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index bd72c38d7bfc..f569cf0095c2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -998,8 +998,9 @@ void
spi_dv_device(struct scsi_device *sdev)
{
struct scsi_target *starget = sdev->sdev_target;
- u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ unsigned int sleep_flags;
+ u8 *buffer;
/*
* Because this function and the power management code both call
@@ -1007,7 +1008,7 @@ spi_dv_device(struct scsi_device *sdev)
* while suspend or resume is in progress. Hence the
* lock/unlock_system_sleep() calls.
*/
- lock_system_sleep();
+ sleep_flags = lock_system_sleep();
if (scsi_autopm_get_device(sdev))
goto unlock_system_sleep;
@@ -1058,7 +1059,7 @@ put_autopm:
scsi_autopm_put_device(sdev);
unlock_system_sleep:
- unlock_system_sleep();
+ unlock_system_sleep(sleep_flags);
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 0ffdb8f2995f..e2c7d8ef205f 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/pagemap.h>
@@ -35,15 +34,14 @@ unsigned char *scsi_bios_ptable(struct block_device *dev)
{
struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
unsigned char *res = NULL;
- struct page *page;
+ struct folio *folio;
- page = read_mapping_page(mapping, 0, NULL);
- if (IS_ERR(page))
+ folio = read_mapping_folio(mapping, 0, NULL);
+ if (IS_ERR(folio))
return NULL;
- if (!PageError(page))
- res = kmemdup(page_address(page) + 0x1be, 66, GFP_KERNEL);
- put_page(page);
+ res = kmemdup(folio_address(folio) + 0x1be, 66, GFP_KERNEL);
+ folio_put(folio);
return res;
}
EXPORT_SYMBOL(scsi_bios_ptable);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 65875a598d62..eb76ba055021 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -38,7 +38,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
-#include <linux/genhd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
@@ -51,7 +50,6 @@
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
-#include <linux/async.h>
#include <linux/slab.h>
#include <linux/sed-opal.h>
#include <linux/pm_runtime.h>
@@ -123,13 +121,7 @@ static void scsi_disk_release(struct device *cdev);
static DEFINE_IDA(sd_index_ida);
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sd_ref_mutex);
-
static struct kmem_cache *sd_cdb_cache;
-static mempool_t *sd_cdb_pool;
static mempool_t *sd_page_pool;
static struct lock_class_key sd_bio_compl_lkclass;
@@ -210,7 +202,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
*/
data.device_specific = 0;
- if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
+ if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
@@ -665,33 +657,6 @@ static int sd_major(int major_idx)
}
}
-static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
-{
- struct scsi_disk *sdkp = NULL;
-
- mutex_lock(&sd_ref_mutex);
-
- if (disk->private_data) {
- sdkp = scsi_disk(disk);
- if (scsi_device_get(sdkp->device) == 0)
- get_device(&sdkp->dev);
- else
- sdkp = NULL;
- }
- mutex_unlock(&sd_ref_mutex);
- return sdkp;
-}
-
-static void scsi_disk_put(struct scsi_disk *sdkp)
-{
- struct scsi_device *sdev = sdkp->device;
-
- mutex_lock(&sd_ref_mutex);
- put_device(&sdkp->dev);
- scsi_device_put(sdev);
- mutex_unlock(&sd_ref_mutex);
-}
-
#ifdef CONFIG_BLK_SED_OPAL
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send)
@@ -832,7 +797,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
case SD_LBP_FULL:
case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0);
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return;
case SD_LBP_UNMAP:
@@ -865,14 +829,13 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
}
blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
unsigned int data_len = 24;
@@ -908,7 +871,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -940,7 +903,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -971,7 +934,7 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1036,13 +999,13 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
* Reporting a maximum number of blocks that is not aligned
* on the device physical size would cause a large write same
* request to be split into physically unaligned chunks by
- * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
- * even if the caller of these functions took care to align the
- * large request. So make sure the maximum reported is aligned
- * to the device physical block size. This is only an optional
- * optimization for regular disks, but this is mandatory to
- * avoid failure of large write same requests directed at
- * sequential write required zones of host-managed ZBC disks.
+ * __blkdev_issue_write_zeroes() even if the caller of this
+ * functions took care to align the large request. So make sure
+ * the maximum reported is aligned to the device physical block
+ * size. This is only an optional optimization for regular
+ * disks, but this is mandatory to avoid failure of large write
+ * same requests directed at sequential write required zones of
+ * host-managed ZBC disks.
*/
sdkp->max_ws_blocks =
round_down(sdkp->max_ws_blocks,
@@ -1051,72 +1014,14 @@ static void sd_config_write_same(struct scsi_disk *sdkp)
}
out:
- blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
- (logical_block_size >> 9));
blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
}
-/**
- * sd_setup_write_same_cmnd - write the same data to multiple blocks
- * @cmd: command to prepare
- *
- * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
- * the preference indicated by the target device.
- **/
-static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
-{
- struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- struct bio *bio = rq->bio;
- u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
- u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
- blk_status_t ret;
-
- if (sdkp->device->no_write_same)
- return BLK_STS_TARGET;
-
- BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
-
- rq->timeout = SD_WRITE_SAME_TIMEOUT;
-
- if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
- cmd->cmd_len = 16;
- cmd->cmnd[0] = WRITE_SAME_16;
- put_unaligned_be64(lba, &cmd->cmnd[2]);
- put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
- } else {
- cmd->cmd_len = 10;
- cmd->cmnd[0] = WRITE_SAME;
- put_unaligned_be32(lba, &cmd->cmnd[2]);
- put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
- }
-
- cmd->transfersize = sdp->sector_size;
- cmd->allowed = sdkp->max_retries;
-
- /*
- * For WRITE SAME the data transferred via the DATA OUT buffer is
- * different from the amount of data actually written to the target.
- *
- * We set up __data_len to the amount of data transferred via the
- * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
- * to transfer a single sector of data first, but then reset it to
- * the amount of data to be written right after so that the I/O path
- * knows how much to actually write.
- */
- rq->__data_len = sdp->sector_size;
- ret = scsi_alloc_sgtables(cmd);
- rq->__data_len = blk_rq_bytes(rq);
-
- return ret;
-}
-
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
/* flush requests don't perform I/O, zero the S/G table */
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
@@ -1134,13 +1039,7 @@ static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
sector_t lba, unsigned int nr_blocks,
unsigned char flags)
{
- cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
- if (unlikely(cmd->cmnd == NULL))
- return BLK_STS_RESOURCE;
-
cmd->cmd_len = SD_EXT_CDB_SIZE;
- memset(cmd->cmnd, 0, cmd->cmd_len);
-
cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
cmd->cmnd[7] = 0x18; /* Additional CDB len */
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
@@ -1215,7 +1114,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sector_t threshold;
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1236,7 +1135,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
goto fail;
}
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
goto fail;
}
@@ -1331,7 +1230,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
switch (req_op(rq)) {
case REQ_OP_DISCARD:
- switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
+ switch (scsi_disk(rq->q->disk)->provisioning_mode) {
case SD_LBP_UNMAP:
return sd_setup_unmap_cmnd(cmd);
case SD_LBP_WS16:
@@ -1345,8 +1244,6 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
}
case REQ_OP_WRITE_ZEROES:
return sd_setup_write_zeroes_cmnd(cmd);
- case REQ_OP_WRITE_SAME:
- return sd_setup_write_same_cmnd(cmd);
case REQ_OP_FLUSH:
return sd_setup_flush_cmnd(cmd);
case REQ_OP_READ:
@@ -1374,17 +1271,9 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
struct request *rq = scsi_cmd_to_rq(SCpnt);
- u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
mempool_free(rq->special_vec.bv_page, sd_page_pool);
-
- if (SCpnt->cmnd != scsi_req(rq)->cmd) {
- cmnd = SCpnt->cmnd;
- SCpnt->cmnd = NULL;
- SCpnt->cmd_len = 0;
- mempool_free(cmnd, sd_cdb_pool);
- }
}
static bool sd_need_revalidate(struct block_device *bdev,
@@ -1420,17 +1309,15 @@ static bool sd_need_revalidate(struct block_device *bdev,
**/
static int sd_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
- struct scsi_device *sdev;
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
int retval;
- if (!sdkp)
+ if (scsi_device_get(sdev))
return -ENXIO;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
- sdev = sdkp->device;
-
/*
* If the device is in error recovery, wait until it is done.
* If the device is offline, then disallow any access to it.
@@ -1475,7 +1362,7 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
return 0;
error_out:
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
return retval;
}
@@ -1504,7 +1391,7 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- scsi_disk_put(sdkp);
+ scsi_device_put(sdev);
}
static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1574,7 +1461,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
if (is_sed_ioctl(cmd))
return sed_ioctl(sdkp->opal_dev, cmd, p);
- return scsi_ioctl(sdp, disk, mode, cmd, p);
+ return scsi_ioctl(sdp, mode, cmd, p);
}
static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1618,7 +1505,7 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_disk *sdkp = disk->private_data;
struct scsi_device *sdp;
int retval;
bool disk_changed;
@@ -1681,7 +1568,6 @@ out:
*/
disk_changed = sdp->changed;
sdp->changed = 0;
- scsi_disk_put(sdkp);
return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
@@ -1889,6 +1775,13 @@ static const struct pr_ops sd_pr_ops = {
.pr_clear = sd_pr_clear,
};
+static void scsi_disk_free_disk(struct gendisk *disk)
+{
+ struct scsi_disk *sdkp = scsi_disk(disk);
+
+ put_device(&sdkp->disk_dev);
+}
+
static const struct block_device_operations sd_fops = {
.owner = THIS_MODULE,
.open = sd_open,
@@ -1900,6 +1793,7 @@ static const struct block_device_operations sd_fops = {
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
.get_unique_id = sd_get_unique_id,
+ .free_disk = scsi_disk_free_disk,
.pr_ops = &sd_pr_ops,
};
@@ -1917,7 +1811,7 @@ static const struct block_device_operations sd_fops = {
**/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
- struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
/* New SCSI EH run, reset gate variable */
sdkp->ignore_medium_access_errors = false;
@@ -1937,7 +1831,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
**/
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
- struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
struct scsi_device *sdev = scmd->device;
if (!scsi_device_online(sdev) ||
@@ -2034,14 +1928,13 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int resid;
struct scsi_sense_hdr sshdr;
struct request *req = scsi_cmd_to_rq(SCpnt);
- struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(req->q->disk);
int sense_valid = 0;
int sense_deferred = 0;
switch (req_op(req)) {
case REQ_OP_DISCARD:
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
@@ -2281,40 +2174,48 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
{
struct scsi_device *sdp = sdkp->device;
u8 type;
- int ret = 0;
if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
sdkp->protection_type = 0;
- return ret;
+ return 0;
}
type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
- if (type > T10_PI_TYPE3_PROTECTION)
- ret = -ENODEV;
- else if (scsi_host_dif_capable(sdp->host, type))
- ret = 1;
-
- if (sdkp->first_scan || type != sdkp->protection_type)
- switch (ret) {
- case -ENODEV:
- sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
- " protection type %u. Disabling disk!\n",
- type);
- break;
- case 1:
- sd_printk(KERN_NOTICE, sdkp,
- "Enabling DIF Type %u protection\n", type);
- break;
- case 0:
- sd_printk(KERN_NOTICE, sdkp,
- "Disabling DIF Type %u protection\n", type);
- break;
- }
+ if (type > T10_PI_TYPE3_PROTECTION) {
+ sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
+ " protection type %u. Disabling disk!\n",
+ type);
+ sdkp->protection_type = 0;
+ return -ENODEV;
+ }
sdkp->protection_type = type;
- return ret;
+ return 0;
+}
+
+static void sd_config_protection(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!sdkp->first_scan)
+ return;
+
+ sd_dif_config_host(sdkp);
+
+ if (!sdkp->protection_type)
+ return;
+
+ if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "Disabling DIF Type %u protection\n",
+ sdkp->protection_type);
+ sdkp->protection_type = 0;
+ }
+
+ sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
+ sdkp->protection_type);
}
static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
@@ -2948,40 +2849,37 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
- unsigned int sector_sz = sdkp->device->sector_size;
- const int vpd_len = 64;
- unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
+ struct scsi_vpd *vpd;
- if (!buffer ||
- /* Block Limits VPD */
- scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
- goto out;
+ rcu_read_lock();
- blk_queue_io_min(sdkp->disk->queue,
- get_unaligned_be16(&buffer[6]) * sector_sz);
+ vpd = rcu_dereference(sdkp->device->vpd_pgb0);
+ if (!vpd || vpd->len < 16)
+ goto out;
- sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
- sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
+ sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
+ sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
+ sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
- if (buffer[3] == 0x3c) {
+ if (vpd->len >= 64) {
unsigned int lba_count, desc_count;
- sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
+ sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
if (!sdkp->lbpme)
goto out;
- lba_count = get_unaligned_be32(&buffer[20]);
- desc_count = get_unaligned_be32(&buffer[24]);
+ lba_count = get_unaligned_be32(&vpd->data[20]);
+ desc_count = get_unaligned_be32(&vpd->data[24]);
if (lba_count && desc_count)
sdkp->max_unmap_blocks = lba_count;
- sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
+ sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
- if (buffer[32] & 0x80)
+ if (vpd->data[32] & 0x80)
sdkp->unmap_alignment =
- get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+ get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
@@ -3003,7 +2901,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
}
out:
- kfree(buffer);
+ rcu_read_unlock();
}
/**
@@ -3013,18 +2911,21 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
struct request_queue *q = sdkp->disk->queue;
- unsigned char *buffer;
+ struct scsi_vpd *vpd;
u16 rot;
- const int vpd_len = 64;
+ u8 zoned;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!buffer ||
- /* Block Device Characteristics VPD */
- scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
- rot = get_unaligned_be16(&buffer[4]);
+ rot = get_unaligned_be16(&vpd->data[4]);
+ zoned = (vpd->data[8] >> 4) & 3;
+ rcu_read_unlock();
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
@@ -3033,20 +2934,20 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (sdkp->device->type == TYPE_ZBC) {
/* Host-managed */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
} else {
- sdkp->zoned = (buffer[8] >> 4) & 3;
+ sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
/* Host-aware */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
} else {
/* Regular disk or drive managed disk */
- blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
+ disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
}
}
if (!sdkp->first_scan)
- goto out;
+ return;
if (blk_queue_is_zoned(q)) {
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
@@ -3059,9 +2960,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
sd_printk(KERN_NOTICE, sdkp,
"Drive-managed SMR disk\n");
}
-
- out:
- kfree(buffer);
}
/**
@@ -3070,24 +2968,24 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
*/
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
- unsigned char *buffer;
- const int vpd_len = 8;
+ struct scsi_vpd *vpd;
if (sdkp->lbpme == 0)
return;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb2);
- if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
sdkp->lbpvpd = 1;
- sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
- sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
- sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
-
- out:
- kfree(buffer);
+ sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
+ sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
+ rcu_read_unlock();
}
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
@@ -3101,8 +2999,7 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
}
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
- /* too large values might cause issues with arcmsr */
- int vpd_buf_len = 64;
+ struct scsi_vpd *vpd;
sdev->no_report_opcodes = 1;
@@ -3110,8 +3007,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
* CODES is unsupported and the device has an ATA
* Information VPD page (SAT).
*/
- if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (vpd)
sdev->no_write_same = 1;
+ rcu_read_unlock();
}
if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
@@ -3172,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp)
goto out;
/* We must have at least a 64B header and one 32B range descriptor */
- vpd_len = get_unaligned_be16(&buffer[2]) + 3;
+ vpd_len = get_unaligned_be16(&buffer[2]) + 4;
if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
sd_printk(KERN_ERR, sdkp,
"Invalid Concurrent Positioning Ranges VPD page\n");
@@ -3215,6 +3115,29 @@ out:
kfree(buffer);
}
+static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
+{
+ struct scsi_device *sdp = sdkp->device;
+ unsigned int min_xfer_bytes =
+ logical_to_bytes(sdp, sdkp->min_xfer_blocks);
+
+ if (sdkp->min_xfer_blocks == 0)
+ return false;
+
+ if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Preferred minimum I/O size %u bytes not a " \
+ "multiple of physical block size (%u bytes)\n",
+ min_xfer_bytes, sdkp->physical_block_size);
+ sdkp->min_xfer_blocks = 0;
+ return false;
+ }
+
+ sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
+ min_xfer_bytes);
+ return true;
+}
+
/*
* Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, not a
@@ -3226,6 +3149,8 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
struct scsi_device *sdp = sdkp->device;
unsigned int opt_xfer_bytes =
logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ unsigned int min_xfer_bytes =
+ logical_to_bytes(sdp, sdkp->min_xfer_blocks);
if (sdkp->opt_xfer_blocks == 0)
return false;
@@ -3254,6 +3179,15 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
return false;
}
+ if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
+ sd_first_printk(KERN_WARNING, sdkp,
+ "Optimal transfer size %u bytes not a " \
+ "multiple of preferred minimum block " \
+ "size (%u bytes)\n",
+ opt_xfer_bytes, min_xfer_bytes);
+ return false;
+ }
+
if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
sd_first_printk(KERN_WARNING, sdkp,
"Optimal transfer size %u bytes not a " \
@@ -3321,6 +3255,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
+ sd_read_cpr(sdkp);
}
sd_print_capacity(sdkp, old_capacity);
@@ -3330,7 +3265,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
- sd_read_cpr(sdkp);
+ sd_config_protection(sdkp);
}
/*
@@ -3346,6 +3281,12 @@ static int sd_revalidate_disk(struct gendisk *disk)
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+ if (sd_validate_min_xfer_size(sdkp))
+ blk_queue_io_min(sdkp->disk->queue,
+ logical_to_bytes(sdp, sdkp->min_xfer_blocks));
+ else
+ blk_queue_io_min(sdkp->disk->queue, 0);
+
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
@@ -3355,6 +3296,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
(sector_t)BLK_DEF_MAX_SECTORS);
}
+ /*
+ * Limit default to SCSI host optimal sector limit if set. There may be
+ * an impact on performance for when the size of a request exceeds this
+ * host limit.
+ */
+ rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);
+
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
@@ -3499,8 +3447,8 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
- &sd_bio_compl_lkclass);
+ gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
+ &sd_bio_compl_lkclass);
if (!gd)
goto out_free;
@@ -3517,7 +3465,6 @@ static int sd_probe(struct device *dev)
}
sdkp->device = sdp;
- sdkp->driver = &sd_template;
sdkp->disk = gd;
sdkp->index = index;
sdkp->max_retries = SD_MAX_RETRIES;
@@ -3532,14 +3479,14 @@ static int sd_probe(struct device *dev)
SD_MOD_TIMEOUT);
}
- device_initialize(&sdkp->dev);
- sdkp->dev.parent = get_device(dev);
- sdkp->dev.class = &sd_disk_class;
- dev_set_name(&sdkp->dev, "%s", dev_name(dev));
+ device_initialize(&sdkp->disk_dev);
+ sdkp->disk_dev.parent = get_device(dev);
+ sdkp->disk_dev.class = &sd_disk_class;
+ dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
- error = device_add(&sdkp->dev);
+ error = device_add(&sdkp->disk_dev);
if (error) {
- put_device(&sdkp->dev);
+ put_device(&sdkp->disk_dev);
goto out;
}
@@ -3550,7 +3497,7 @@ static int sd_probe(struct device *dev)
gd->minors = SD_MINORS;
gd->fops = &sd_fops;
- gd->private_data = &sdkp->driver;
+ gd->private_data = sdkp;
/* defaults, until the device tells us otherwise */
sdp->sector_size = 512;
@@ -3566,7 +3513,6 @@ static int sd_probe(struct device *dev)
sd_revalidate_disk(gd);
- gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
@@ -3581,15 +3527,11 @@ static int sd_probe(struct device *dev)
error = device_add_disk(dev, gd, NULL);
if (error) {
- put_device(&sdkp->dev);
+ put_device(&sdkp->disk_dev);
+ put_disk(gd);
goto out;
}
- if (sdkp->capacity)
- sd_dif_config_host(sdkp);
-
- sd_revalidate_disk(gd);
-
if (sdkp->security) {
sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
if (sdkp->opal_dev)
@@ -3607,7 +3549,6 @@ static int sd_probe(struct device *dev)
out_put:
put_disk(gd);
out_free:
- sd_zbc_release_disk(sdkp);
kfree(sdkp);
out:
scsi_autopm_put_device(sdp);
@@ -3627,58 +3568,26 @@ static int sd_probe(struct device *dev)
**/
static int sd_remove(struct device *dev)
{
- struct scsi_disk *sdkp;
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
- sdkp = dev_get_drvdata(dev);
scsi_autopm_get_device(sdkp->device);
- device_del(&sdkp->dev);
+ device_del(&sdkp->disk_dev);
del_gendisk(sdkp->disk);
sd_shutdown(dev);
- free_opal_dev(sdkp->opal_dev);
-
- mutex_lock(&sd_ref_mutex);
- dev_set_drvdata(dev, NULL);
- put_device(&sdkp->dev);
- mutex_unlock(&sd_ref_mutex);
-
+ put_disk(sdkp->disk);
return 0;
}
-/**
- * scsi_disk_release - Called to free the scsi_disk structure
- * @dev: pointer to embedded class device
- *
- * sd_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_disk_get()
- * scsi_disk_put() helpers which manipulate the semaphore directly
- * and never do a direct put_device.
- **/
static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- struct gendisk *disk = sdkp->disk;
- struct request_queue *q = disk->queue;
ida_free(&sd_index_ida, sdkp->index);
-
- /*
- * Wait until all requests that are in progress have completed.
- * This is necessary to avoid that e.g. scsi_end_request() crashes
- * due to clearing the disk->private_data pointer. Wait from inside
- * scsi_disk_release() instead of from sd_release() to avoid that
- * freezing and unfreezing the request queue affects user space I/O
- * in case multiple processes open a /dev/sd... node concurrently.
- */
- blk_mq_freeze_queue(q);
- blk_mq_unfreeze_queue(q);
-
- disk->private_data = NULL;
- put_disk(disk);
+ sd_zbc_free_zone_info(sdkp);
put_device(&sdkp->device->sdev_gendev);
-
- sd_zbc_release_disk(sdkp);
+ free_opal_dev(sdkp->opal_dev);
kfree(sdkp);
}
@@ -3754,7 +3663,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
return 0;
if (sdkp->WCE && sdkp->media_present) {
- sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+ if (!sdkp->device->silence_suspend)
+ sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp, &sshdr);
if (ret) {
@@ -3776,7 +3686,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
if (sdkp->device->manage_start_stop) {
- sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ if (!sdkp->device->silence_suspend)
+ sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
if (ignore_stop_errors)
@@ -3882,18 +3793,11 @@ static int __init init_sd(void)
goto err_out_class;
}
- sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
- if (!sd_cdb_pool) {
- printk(KERN_ERR "sd: can't init extended cdb pool\n");
- err = -ENOMEM;
- goto err_out_cache;
- }
-
sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
if (!sd_page_pool) {
printk(KERN_ERR "sd: can't init discard page pool\n");
err = -ENOMEM;
- goto err_out_ppool;
+ goto err_out_cache;
}
err = scsi_register_driver(&sd_template.gendrv);
@@ -3905,9 +3809,6 @@ static int __init init_sd(void)
err_out_driver:
mempool_destroy(sd_page_pool);
-err_out_ppool:
- mempool_destroy(sd_cdb_pool);
-
err_out_cache:
kmem_cache_destroy(sd_cdb_cache);
@@ -3931,7 +3832,6 @@ static void __exit exit_sd(void)
SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
scsi_unregister_driver(&sd_template.gendrv);
- mempool_destroy(sd_cdb_pool);
mempool_destroy(sd_page_pool);
kmem_cache_destroy(sd_cdb_cache);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 2e5932bde43d..5eea762f84d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -67,20 +67,43 @@ enum {
SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */
};
+/**
+ * struct zoned_disk_info - Specific properties of a ZBC SCSI device.
+ * @nr_zones: number of zones.
+ * @zone_blocks: number of logical blocks per zone.
+ *
+ * This data structure holds the ZBC SCSI device properties that are retrieved
+ * twice: a first time before the gendisk capacity is known and a second time
+ * after the gendisk capacity is known.
+ */
+struct zoned_disk_info {
+ u32 nr_zones;
+ u32 zone_blocks;
+};
+
struct scsi_disk {
- struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
- struct device dev;
+
+ /*
+ * disk_dev is used to show attributes in /sys/class/scsi_disk/,
+ * but otherwise not really needed. Do not use for refcounting.
+ */
+ struct device disk_dev;
struct gendisk *disk;
struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
- u32 nr_zones;
- u32 rev_nr_zones;
- u32 zone_blocks;
- u32 rev_zone_blocks;
+ /* Updated during revalidation before the gendisk capacity is known. */
+ struct zoned_disk_info early_zone_info;
+ /* Updated during revalidation after the gendisk capacity is known. */
+ struct zoned_disk_info zone_info;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
+ /*
+ * Either zero or a power of two. If not zero it means that the offset
+ * between zone starting LBAs is constant.
+ */
+ u32 zone_starting_lba_gran;
u32 *zones_wp_offset;
spinlock_t zones_wp_offset_lock;
u32 *rev_wp_offset;
@@ -91,6 +114,7 @@ struct scsi_disk {
atomic_t openers;
sector_t capacity; /* size in logical blocks */
int max_retries;
+ u32 min_xfer_blocks;
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
@@ -127,11 +151,11 @@ struct scsi_disk {
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
};
-#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
+#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_disk, driver);
+ return disk->private_data;
}
#define sd_printk(prefix, sdsk, fmt, a...) \
@@ -217,8 +241,8 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
-void sd_zbc_release_disk(struct scsi_disk *sdkp);
-int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp);
+int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]);
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
unsigned char op, bool all);
@@ -232,10 +256,9 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
#else /* CONFIG_BLK_DEV_ZONED */
-static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
+static inline void sd_zbc_free_zone_info(struct scsi_disk *sdkp) {}
-static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
- unsigned char *buf)
+static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
{
return 0;
}
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 349950616adc..968993ee6d5d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -59,8 +59,6 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
bi.profile = &t10_pi_type1_crc;
bi.tuple_size = sizeof(struct t10_pi_tuple);
- sd_printk(KERN_NOTICE, sdkp,
- "Enabling DIX %s protection\n", bi.profile->name);
if (dif && type) {
bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
@@ -72,11 +70,11 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
bi.tag_size = sizeof(u16) + sizeof(u32);
else
bi.tag_size = sizeof(u16);
-
- sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
- bi.tag_size);
}
+ sd_printk(KERN_NOTICE, sdkp,
+ "Enabling DIX %s, application tag size %u bytes\n",
+ bi.profile->name, bi.tag_size);
out:
blk_integrity_register(disk, &bi);
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index ed06798983f8..bd15624c6322 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -20,6 +20,12 @@
#include "sd.h"
+/**
+ * sd_zbc_get_zone_wp_offset - Get zone write pointer offset.
+ * @zone: Zone for which to return the write pointer offset.
+ *
+ * Return: offset of the write pointer from the start of the zone.
+ */
static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
{
if (zone->type == ZBC_ZONE_TYPE_CONV)
@@ -44,13 +50,37 @@ static unsigned int sd_zbc_get_zone_wp_offset(struct blk_zone *zone)
}
}
-static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
+/* Whether or not a SCSI zone descriptor describes a gap zone. */
+static bool sd_zbc_is_gap_zone(const u8 buf[64])
+{
+ return (buf[0] & 0xf) == ZBC_ZONE_TYPE_GAP;
+}
+
+/**
+ * sd_zbc_parse_report - Parse a SCSI zone descriptor
+ * @sdkp: SCSI disk pointer.
+ * @buf: SCSI zone descriptor.
+ * @idx: Index of the zone relative to the first zone reported by the current
+ * sd_zbc_report_zones() call.
+ * @cb: Callback function pointer.
+ * @data: Second argument passed to @cb.
+ *
+ * Return: Value returned by @cb.
+ *
+ * Convert a SCSI zone descriptor into struct blk_zone format. Additionally,
+ * call @cb(blk_zone, @data).
+ */
+static int sd_zbc_parse_report(struct scsi_disk *sdkp, const u8 buf[64],
unsigned int idx, report_zones_cb cb, void *data)
{
struct scsi_device *sdp = sdkp->device;
struct blk_zone zone = { 0 };
+ sector_t start_lba, gran;
int ret;
+ if (WARN_ON_ONCE(sd_zbc_is_gap_zone(buf)))
+ return -EINVAL;
+
zone.type = buf[0] & 0x0f;
zone.cond = (buf[1] >> 4) & 0xf;
if (buf[1] & 0x01)
@@ -58,13 +88,31 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
if (buf[1] & 0x02)
zone.non_seq = 1;
- zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
- zone.capacity = zone.len;
- zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
- zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- if (zone.type != ZBC_ZONE_TYPE_CONV &&
- zone.cond == ZBC_ZONE_COND_FULL)
+ start_lba = get_unaligned_be64(&buf[16]);
+ zone.start = logical_to_sectors(sdp, start_lba);
+ zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+ zone.len = zone.capacity;
+ if (sdkp->zone_starting_lba_gran) {
+ gran = logical_to_sectors(sdp, sdkp->zone_starting_lba_gran);
+ if (zone.len > gran) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid zone at LBA %llu with capacity %llu and length %llu; granularity = %llu\n",
+ start_lba,
+ sectors_to_logical(sdp, zone.capacity),
+ sectors_to_logical(sdp, zone.len),
+ sectors_to_logical(sdp, gran));
+ return -EINVAL;
+ }
+ /*
+ * Use the starting LBA granularity instead of the zone length
+ * obtained from the REPORT ZONES command.
+ */
+ zone.len = gran;
+ }
+ if (zone.cond == ZBC_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
+ else
+ zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
ret = cb(&zone, idx, data);
if (ret)
@@ -161,7 +209,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
*/
- nr_zones = min(nr_zones, sdkp->nr_zones);
+ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
@@ -186,16 +234,28 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
*/
static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
{
- return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+ return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks);
}
+/**
+ * sd_zbc_report_zones - SCSI .report_zones() callback.
+ * @disk: Disk to report zones for.
+ * @sector: Start sector.
+ * @nr_zones: Maximum number of zones to report.
+ * @cb: Callback function called to report zone information.
+ * @data: Second argument passed to @cb.
+ *
+ * Called by the block layer to iterate over zone information. See also the
+ * disk->fops->report_zones() calls in block/blk-zoned.c.
+ */
int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct scsi_disk *sdkp = scsi_disk(disk);
- sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
+ sector_t lba = sectors_to_logical(sdkp->device, sector);
unsigned int nr, i;
unsigned char *buf;
+ u64 zone_length, start_lba;
size_t offset, buflen = 0;
int zone_idx = 0;
int ret;
@@ -204,7 +264,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
/* Not a zoned device */
return -EOPNOTSUPP;
- if (!capacity)
+ if (!sdkp->capacity)
/* Device gone or invalid */
return -ENODEV;
@@ -212,9 +272,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
if (!buf)
return -ENOMEM;
- while (zone_idx < nr_zones && sector < capacity) {
- ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
- sectors_to_logical(sdkp->device, sector), true);
+ while (zone_idx < nr_zones && lba < sdkp->capacity) {
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen, lba, true);
if (ret)
goto out;
@@ -225,14 +284,36 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
for (i = 0; i < nr && zone_idx < nr_zones; i++) {
offset += 64;
+ start_lba = get_unaligned_be64(&buf[offset + 16]);
+ zone_length = get_unaligned_be64(&buf[offset + 8]);
+ if ((zone_idx == 0 &&
+ (lba < start_lba ||
+ lba >= start_lba + zone_length)) ||
+ (zone_idx > 0 && start_lba != lba) ||
+ start_lba + zone_length < start_lba) {
+ sd_printk(KERN_ERR, sdkp,
+ "Zone %d at LBA %llu is invalid: %llu + %llu\n",
+ zone_idx, lba, start_lba, zone_length);
+ ret = -EINVAL;
+ goto out;
+ }
+ lba = start_lba + zone_length;
+ if (sd_zbc_is_gap_zone(&buf[offset])) {
+ if (sdkp->zone_starting_lba_gran)
+ continue;
+ sd_printk(KERN_ERR, sdkp,
+ "Gap zone without constant LBA offsets\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx,
cb, data);
if (ret)
goto out;
+
zone_idx++;
}
-
- sector += sd_zbc_zone_sectors(sdkp) * i;
}
ret = zone_idx;
@@ -244,7 +325,7 @@ out:
static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t sector = blk_rq_pos(rq);
if (!sd_is_zoned(sdkp))
@@ -276,6 +357,10 @@ static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
return 0;
}
+/*
+ * An attempt to append a zone triggered an invalid write pointer error.
+ * Reread the write pointer of the zone(s) in which the append failed.
+ */
static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
{
struct scsi_disk *sdkp;
@@ -286,14 +371,14 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
- for (zno = 0; zno < sdkp->nr_zones; zno++) {
+ for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) {
if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
continue;
spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
SD_BUF_SIZE,
- zno * sdkp->zone_blocks, true);
+ zno * sdkp->zone_info.zone_blocks, true);
spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
if (!ret)
sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
@@ -322,7 +407,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
unsigned int nr_blocks)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int wp_offset, zno = blk_rq_zone_no(rq);
unsigned long flags;
blk_status_t ret;
@@ -360,7 +445,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
break;
default:
wp_offset = sectors_to_logical(sdkp->device, wp_offset);
- if (wp_offset + nr_blocks > sdkp->zone_blocks) {
+ if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) {
ret = BLK_STS_IOERR;
break;
}
@@ -388,7 +473,7 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
{
struct request *rq = scsi_cmd_to_rq(cmd);
sector_t sector = blk_rq_pos(rq);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t block = sectors_to_logical(sdkp->device, sector);
blk_status_t ret;
@@ -423,7 +508,6 @@ static bool sd_zbc_need_zone_wp_update(struct request *rq)
return true;
case REQ_OP_WRITE:
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
return blk_rq_zone_is_seq(rq);
default:
return false;
@@ -443,9 +527,9 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
{
int result = cmd->result;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int zno = blk_rq_zone_no(rq);
- enum req_opf op = req_op(rq);
+ enum req_op op = req_op(rq);
unsigned long flags;
/*
@@ -477,7 +561,6 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
rq->__sector += sdkp->zones_wp_offset[zno];
fallthrough;
case REQ_OP_WRITE_ZEROES:
- case REQ_OP_WRITE_SAME:
case REQ_OP_WRITE:
if (sdkp->zones_wp_offset[zno] < sd_zbc_zone_sectors(sdkp))
sdkp->zones_wp_offset[zno] +=
@@ -491,7 +574,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
break;
case REQ_OP_ZONE_RESET_ALL:
memset(sdkp->zones_wp_offset, 0,
- sdkp->nr_zones * sizeof(unsigned int));
+ sdkp->zone_info.nr_zones * sizeof(unsigned int));
break;
default:
break;
@@ -547,6 +630,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
unsigned char *buf)
{
+ u64 zone_starting_lba_gran;
if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
sd_printk(KERN_NOTICE, sdkp,
@@ -560,12 +644,36 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
sdkp->zones_max_open = 0;
- } else {
- /* Host-managed */
- sdkp->urswrz = buf[4] & 1;
- sdkp->zones_optimal_open = 0;
- sdkp->zones_optimal_nonseq = 0;
- sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
+ return 0;
+ }
+
+ /* Host-managed */
+ sdkp->urswrz = buf[4] & 1;
+ sdkp->zones_optimal_open = 0;
+ sdkp->zones_optimal_nonseq = 0;
+ sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
+ /* Check zone alignment method */
+ switch (buf[23] & 0xf) {
+ case 0:
+ case ZBC_CONSTANT_ZONE_LENGTH:
+ /* Use zone length */
+ break;
+ case ZBC_CONSTANT_ZONE_START_OFFSET:
+ zone_starting_lba_gran = get_unaligned_be64(&buf[24]);
+ if (zone_starting_lba_gran == 0 ||
+ !is_power_of_2(zone_starting_lba_gran) ||
+ logical_to_sectors(sdkp->device, zone_starting_lba_gran) >
+ UINT_MAX) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid zone starting LBA granularity %llu\n",
+ zone_starting_lba_gran);
+ return -ENODEV;
+ }
+ sdkp->zone_starting_lba_gran = zone_starting_lba_gran;
+ break;
+ default:
+ sd_printk(KERN_ERR, sdkp, "Invalid zone alignment method\n");
+ return -ENODEV;
}
/*
@@ -587,7 +695,7 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
* sd_zbc_check_capacity - Check the device capacity
* @sdkp: Target disk
* @buf: command buffer
- * @zblocks: zone size in number of blocks
+ * @zblocks: zone size in logical blocks
*
* Get the device zone size and check that the device capacity as reported
* by READ CAPACITY matches the max_lba value (plus one) of the report zones
@@ -621,14 +729,25 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf,
}
}
- /* Get the size of the first reported zone */
- rec = buf + 64;
- zone_blocks = get_unaligned_be64(&rec[8]);
- if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "Zone size too large\n");
- return -EFBIG;
+ if (sdkp->zone_starting_lba_gran == 0) {
+ /* Get the size of the first reported zone */
+ rec = buf + 64;
+ zone_blocks = get_unaligned_be64(&rec[8]);
+ if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "Zone size too large\n");
+ return -EFBIG;
+ }
+ } else {
+ zone_blocks = sdkp->zone_starting_lba_gran;
+ }
+
+ if (!is_power_of_2(zone_blocks)) {
+ sd_printk(KERN_ERR, sdkp,
+ "Zone size %llu is not a power of two.\n",
+ zone_blocks);
+ return -EINVAL;
}
*zblocks = zone_blocks;
@@ -641,16 +760,16 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp)
if (!sd_is_zoned(sdkp) || !sdkp->capacity)
return;
- if (sdkp->capacity & (sdkp->zone_blocks - 1))
+ if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
sd_printk(KERN_NOTICE, sdkp,
"%u zones of %u logical blocks + 1 runt zone\n",
- sdkp->nr_zones - 1,
- sdkp->zone_blocks);
+ sdkp->zone_info.nr_zones - 1,
+ sdkp->zone_info.zone_blocks);
else
sd_printk(KERN_NOTICE, sdkp,
"%u zones of %u logical blocks\n",
- sdkp->nr_zones,
- sdkp->zone_blocks);
+ sdkp->zone_info.nr_zones,
+ sdkp->zone_info.zone_blocks);
}
static int sd_zbc_init_disk(struct scsi_disk *sdkp)
@@ -667,8 +786,11 @@ static int sd_zbc_init_disk(struct scsi_disk *sdkp)
return 0;
}
-static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
+void sd_zbc_free_zone_info(struct scsi_disk *sdkp)
{
+ if (!sdkp->zone_wp_update_buf)
+ return;
+
/* Serialize against revalidate zones */
mutex_lock(&sdkp->rev_mutex);
@@ -677,20 +799,12 @@ static void sd_zbc_clear_zone_info(struct scsi_disk *sdkp)
kfree(sdkp->zone_wp_update_buf);
sdkp->zone_wp_update_buf = NULL;
- sdkp->nr_zones = 0;
- sdkp->rev_nr_zones = 0;
- sdkp->zone_blocks = 0;
- sdkp->rev_zone_blocks = 0;
+ sdkp->early_zone_info = (struct zoned_disk_info){ };
+ sdkp->zone_info = (struct zoned_disk_info){ };
mutex_unlock(&sdkp->rev_mutex);
}
-void sd_zbc_release_disk(struct scsi_disk *sdkp)
-{
- if (sd_is_zoned(sdkp))
- sd_zbc_clear_zone_info(sdkp);
-}
-
static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
@@ -698,12 +812,17 @@ static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset);
}
+/*
+ * Call blk_revalidate_disk_zones() if any of the zoned disk properties have
+ * changed that make it necessary to call that function. Called by
+ * sd_revalidate_disk() after the gendisk capacity has been set.
+ */
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
{
struct gendisk *disk = sdkp->disk;
struct request_queue *q = disk->queue;
- u32 zone_blocks = sdkp->rev_zone_blocks;
- unsigned int nr_zones = sdkp->rev_nr_zones;
+ u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
+ unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
u32 max_append;
int ret = 0;
unsigned int flags;
@@ -734,14 +853,14 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
*/
mutex_lock(&sdkp->rev_mutex);
- if (sdkp->zone_blocks == zone_blocks &&
- sdkp->nr_zones == nr_zones &&
- disk->queue->nr_zones == nr_zones)
+ if (sdkp->zone_info.zone_blocks == zone_blocks &&
+ sdkp->zone_info.nr_zones == nr_zones &&
+ disk->nr_zones == nr_zones)
goto unlock;
flags = memalloc_noio_save();
- sdkp->zone_blocks = zone_blocks;
- sdkp->nr_zones = nr_zones;
+ sdkp->zone_info.zone_blocks = zone_blocks;
+ sdkp->zone_info.nr_zones = nr_zones;
sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
if (!sdkp->rev_wp_offset) {
ret = -ENOMEM;
@@ -756,8 +875,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
sdkp->rev_wp_offset = NULL;
if (ret) {
- sdkp->zone_blocks = 0;
- sdkp->nr_zones = 0;
+ sdkp->zone_info = (struct zoned_disk_info){ };
sdkp->capacity = 0;
goto unlock;
}
@@ -776,7 +894,16 @@ unlock:
return ret;
}
-int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
+/**
+ * sd_zbc_read_zones - Read zone information and update the request queue
+ * @sdkp: SCSI disk pointer.
+ * @buf: 512 byte buffer used for storing SCSI command output.
+ *
+ * Read zone information and update the request queue zone characteristics and
+ * also the zoned device information in *sdkp. Called by sd_revalidate_disk()
+ * before the gendisk capacity has been set.
+ */
+int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
{
struct gendisk *disk = sdkp->disk;
struct request_queue *q = disk->queue;
@@ -784,12 +911,15 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
u32 zone_blocks = 0;
int ret;
- if (!sd_is_zoned(sdkp))
+ if (!sd_is_zoned(sdkp)) {
/*
- * Device managed or normal SCSI disk,
- * no special handling required
+ * Device managed or normal SCSI disk, no special handling
+ * required. Nevertheless, free the disk zone information in
+ * case the device type changed.
*/
+ sd_zbc_free_zone_info(sdkp);
return 0;
+ }
/* READ16/WRITE16 is mandatory for ZBC disks */
sdkp->device->use_16_for_rw = 1;
@@ -798,11 +928,11 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
if (!blk_queue_is_zoned(q)) {
/*
* This can happen for a host aware disk with partitions.
- * The block device zone information was already cleared
- * by blk_queue_set_zoned(). Only clear the scsi disk zone
+ * The block device zone model was already cleared by
+ * disk_set_zoned(). Only free the scsi disk zone
* information and exit early.
*/
- sd_zbc_clear_zone_info(sdkp);
+ sd_zbc_free_zone_info(sdkp);
return 0;
}
@@ -820,10 +950,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
if (sdkp->zones_max_open == U32_MAX)
- blk_queue_max_open_zones(q, 0);
+ disk_set_max_open_zones(disk, 0);
else
- blk_queue_max_open_zones(q, sdkp->zones_max_open);
- blk_queue_max_active_zones(q, 0);
+ disk_set_max_open_zones(disk, sdkp->zones_max_open);
+ disk_set_max_active_zones(disk, 0);
nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
/*
@@ -834,8 +964,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
- sdkp->rev_nr_zones = nr_zones;
- sdkp->rev_zone_blocks = zone_blocks;
+ sdkp->early_zone_info.nr_zones = nr_zones;
+ sdkp->early_zone_info.zone_blocks = zone_blocks;
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 141099ab9092..ce34a8ad53b4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -49,11 +49,15 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/uio.h>
#include <linux/cred.h> /* for sg_check_file_access() */
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/sg.h>
#include "scsi_logging.h"
@@ -77,7 +81,7 @@ static int sg_proc_init(void);
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
-int sg_big_buff = SG_DEF_RESERVED_SIZE;
+static int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
@@ -173,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
} Sg_device;
/* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, blk_status_t status);
+static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status);
static int sg_start_req(Sg_request *srp, unsigned char *cmd);
static int sg_finish_rem_req(Sg_request * srp);
static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -191,7 +195,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
-static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@@ -224,11 +228,6 @@ static int sg_check_file_access(struct file *filp, const char *caller)
caller, task_tgid_vnr(current), current->comm);
return -EPERM;
}
- if (uaccess_kernel()) {
- pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
- caller, task_tgid_vnr(current), current->comm);
- return -EACCES;
- }
return 0;
}
@@ -445,6 +444,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
+ bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
@@ -467,20 +467,16 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if (retval)
return retval;
- srp = sg_get_rq_mark(sfp, req_pack_id);
+ srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
- (atomic_read(&sdp->detaching) ||
- (srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
- if (retval)
- /* -ERESTARTSYS as signal hit process */
- return retval;
+ ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
+ (!busy && atomic_read(&sdp->detaching))));
+ if (!srp)
+ /* signal or detaching */
+ return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
@@ -814,7 +810,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
}
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
- scsi_req_free_cmd(scsi_req(srp->rq));
blk_mq_free_request(srp->rq);
srp->rq = NULL;
}
@@ -833,7 +828,8 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io);
+ srp->rq->end_io = sg_rq_end_io;
+ blk_execute_rq_nowait(srp->rq, at_head);
return 0;
}
@@ -941,9 +937,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
- if (atomic_read(&sdp->detaching))
- return -ENODEV;
+ srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@@ -1109,7 +1103,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1165,7 +1159,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
if (ret != -ENOIOCTLCMD)
return ret;
- return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
}
static __poll_t
@@ -1317,11 +1311,11 @@ sg_rq_end_io_usercontext(struct work_struct *work)
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
-static void
+static enum rq_end_io_ret
sg_rq_end_io(struct request *rq, blk_status_t status)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
struct sg_request *srp = rq->end_io_data;
- struct scsi_request *req = scsi_req(rq);
Sg_device *sdp;
Sg_fd *sfp;
unsigned long iflags;
@@ -1330,19 +1324,19 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
int result, resid, done = 1;
if (WARN_ON(srp->done != 0))
- return;
+ return RQ_END_IO_NONE;
sfp = srp->parentfp;
if (WARN_ON(sfp == NULL))
- return;
+ return RQ_END_IO_NONE;
sdp = sfp->parentdp;
if (unlikely(atomic_read(&sdp->detaching)))
pr_info("%s: device detaching\n", __func__);
- sense = req->sense;
- result = req->result;
- resid = req->resid_len;
+ sense = scmd->sense_buffer;
+ result = scmd->result;
+ resid = scmd->resid_len;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_cmd_done: pack_id=%d, res=0x%x\n",
@@ -1377,8 +1371,8 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
}
}
- if (req->sense_len)
- memcpy(srp->sense_b, req->sense, SCSI_SENSE_BUFFERSIZE);
+ if (scmd->sense_len)
+ memcpy(srp->sense_b, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
/* Rely on write phase to clean out srp status values, so no "else" */
@@ -1389,7 +1383,6 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
* blk_rq_unmap_user() can be called from user context.
*/
srp->rq = NULL;
- scsi_req_free_cmd(scsi_req(rq));
blk_mq_free_request(rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
@@ -1413,6 +1406,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
schedule_work(&srp->ew.work);
}
+ return RQ_END_IO_NONE;
}
static const struct file_operations sg_fops = {
@@ -1634,6 +1628,37 @@ MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+
+static struct ctl_table sg_sysctls[] = {
+ {
+ .procname = "sg-big-buff",
+ .data = &sg_big_buff,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static struct ctl_table_header *hdr;
+static void register_sg_sysctls(void)
+{
+ if (!hdr)
+ hdr = register_sysctl("kernel", sg_sysctls);
+}
+
+static void unregister_sg_sysctls(void)
+{
+ if (hdr)
+ unregister_sysctl_table(hdr);
+}
+#else
+#define register_sg_sysctls() do { } while (0)
+#define unregister_sg_sysctls() do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
static int __init
init_sg(void)
{
@@ -1666,6 +1691,7 @@ init_sg(void)
return 0;
}
class_destroy(sg_sysfs_class);
+ register_sg_sysctls();
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
@@ -1674,6 +1700,7 @@ err_out:
static void __exit
exit_sg(void)
{
+ unregister_sg_sysctls();
#ifdef CONFIG_SCSI_PROC_FS
remove_proc_subtree("scsi/sg", NULL);
#endif /* CONFIG_SCSI_PROC_FS */
@@ -1690,7 +1717,6 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
- struct scsi_request *req;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
@@ -1701,18 +1727,12 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
- unsigned char *long_cmdp = NULL;
+ struct scsi_cmnd *scmd;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
- if (hp->cmd_len > BLK_MAX_CDB) {
- long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
- if (!long_cmdp)
- return -ENOMEM;
- }
-
/*
* NOTE
*
@@ -1726,20 +1746,21 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
*/
rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq)) {
- kfree(long_cmdp);
+ if (IS_ERR(rq))
return PTR_ERR(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
+
+ if (hp->cmd_len > sizeof(scmd->cmnd)) {
+ blk_mq_free_request(rq);
+ return -EINVAL;
}
- req = scsi_req(rq);
- if (hp->cmd_len > BLK_MAX_CDB)
- req->cmd = long_cmdp;
- memcpy(req->cmd, cmd, hp->cmd_len);
- req->cmd_len = hp->cmd_len;
+ memcpy(scmd->cmnd, cmd, hp->cmd_len);
+ scmd->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
- req->retries = SG_DEFAULT_RETRIES;
+ scmd->allowed = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
@@ -1783,26 +1804,8 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md->from_user = 0;
}
- if (iov_count) {
- struct iovec *iov = NULL;
- struct iov_iter i;
-
- res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
- if (res < 0)
- return res;
-
- iov_iter_truncate(&i, hp->dxfer_len);
- if (!iov_iter_count(&i)) {
- kfree(iov);
- return -EINVAL;
- }
-
- res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
- kfree(iov);
- } else
- res = blk_rq_map_user(q, rq, md, hp->dxferp,
- hp->dxfer_len, GFP_ATOMIC);
-
+ res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len,
+ GFP_ATOMIC, iov_count, iov_count, 1, rw);
if (!res) {
srp->bio = rq->bio;
@@ -1828,10 +1831,8 @@ sg_finish_rem_req(Sg_request *srp)
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
- if (srp->rq) {
- scsi_req_free_cmd(scsi_req(srp->rq));
+ if (srp->rq)
blk_mq_free_request(srp->rq);
- }
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
@@ -2056,19 +2057,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
}
static Sg_request *
-sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
+ *busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
- /* look for requests that are ready + not SG_IO owned */
- if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ /* look for requests that are not SG_IO owned */
+ if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
- resp->done = 2; /* guard against other readers */
- write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ switch (resp->done) {
+ case 0: /* request active */
+ *busy = true;
+ break;
+ case 1: /* request done; response ready to return */
+ resp->done = 2; /* guard against other readers */
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return resp;
+ case 2: /* response already being returned */
+ break;
+ }
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@@ -2122,6 +2132,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+
+ /*
+ * If the device is detaching, wakeup any readers in case we just
+ * removed the last response, which would leave nothing for them to
+ * return other than -ENODEV.
+ */
+ if (unlikely(atomic_read(&sfp->parentdp->detaching)))
+ wake_up_interruptible_all(&sfp->read_wait);
+
return res;
}
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index cf1030c9dda1..57d5dff62f63 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -28,7 +28,11 @@
#include <asm/sgi/ip22.h>
#include <asm/sgi/wd.h>
-#include "scsi.h"
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include "wd33c93.h"
struct ip22_hostdata {
@@ -65,14 +69,15 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id)
static inline
void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
{
- unsigned long len = cmd->SCp.this_residual;
- void *addr = cmd->SCp.ptr;
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
+ unsigned long len = scsi_pointer->this_residual;
+ void *addr = scsi_pointer->ptr;
dma_addr_t physaddr;
unsigned long count;
struct hpc_chunk *hcp;
physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din));
- cmd->SCp.dma_handle = physaddr;
+ scsi_pointer->dma_handle = physaddr;
hcp = hd->cpu;
while (len) {
@@ -102,6 +107,7 @@ void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
static int dma_setup(struct scsi_cmnd *cmd, int datainp)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host);
struct hpc3_scsiregs *hregs =
(struct hpc3_scsiregs *) cmd->device->host->base;
@@ -116,7 +122,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
* obvious). IMHO a better fix would be, not to do these dma setups
* in the first place.
*/
- if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0)
+ if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0)
return 1;
fill_hpc_entries(hdata, cmd, datainp);
@@ -136,13 +142,14 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp)
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(SCpnt);
struct ip22_hostdata *hdata = host_to_hostdata(instance);
struct hpc3_scsiregs *hregs;
if (!SCpnt)
return;
- if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0)
+ if (scsi_pointer->ptr == NULL || scsi_pointer->this_residual == 0)
return;
hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base;
@@ -156,8 +163,8 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
barrier();
}
hregs->ctrl = 0;
- dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle,
- SCpnt->SCp.this_residual,
+ dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
+ scsi_pointer->this_residual,
DMA_DIR(hdata->wh.dma_dir));
pr_debug("\n");
@@ -209,6 +216,7 @@ static struct scsi_host_template sgiwd93_template = {
.sg_tablesize = SG_ALL,
.cmd_per_lun = 8,
.dma_boundary = PAGE_SIZE - 1,
+ .cmd_size = sizeof(struct scsi_pointer),
};
static int sgiwd93_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 6f83e2df4d64..973d240649ab 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index aac88ac0a0b7..e550b12e525a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -96,12 +96,6 @@ struct pqi_ctrl_registers {
struct pqi_device_registers pqi_registers; /* 4000h */
};
-#if ((HZ) < 1000)
-#define PQI_HZ 1000
-#else
-#define PQI_HZ (HZ)
-#endif
-
#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
/* shutdown reasons for taking the controller offline */
@@ -299,7 +293,8 @@ struct pqi_raid_path_request {
u8 additional_cdb_bytes_usage : 3;
u8 reserved5 : 3;
u8 cdb[16];
- u8 reserved6[12];
+ u8 reserved6[11];
+ u8 ml_device_lun_number;
__le32 timeout;
struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
};
@@ -473,7 +468,8 @@ struct pqi_task_management_request {
struct pqi_iu_header header;
__le16 request_id;
__le16 nexus_id;
- u8 reserved[2];
+ u8 reserved;
+ u8 ml_device_lun_number;
__le16 timeout;
u8 lun_number[8];
__le16 protocol_specific;
@@ -714,6 +710,7 @@ typedef u32 pqi_index_t;
#define SOP_TMF_COMPLETE 0x0
#define SOP_TMF_REJECTED 0x4
#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
+#define SOP_RC_INCORRECT_LOGICAL_UNIT 0x9
/* additional CDB bytes usage field codes */
#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
@@ -869,7 +866,8 @@ struct pqi_config_table_firmware_features {
#define PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN 16
#define PQI_FIRMWARE_FEATURE_FW_TRIAGE 17
#define PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5 18
-#define PQI_FIRMWARE_FEATURE_MAXIMUM 18
+#define PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT 21
+#define PQI_FIRMWARE_FEATURE_MAXIMUM 21
struct pqi_config_table_debug {
struct pqi_config_table_section_header header;
@@ -924,7 +922,8 @@ union pqi_reset_register {
#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
-#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_MAX_ENTRIES 1024
+#define RAID_MAP_MAX_DATA_DISKS_PER_ROW 128
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
@@ -1086,8 +1085,10 @@ struct pqi_stream_data {
u32 last_accessed;
};
+#define PQI_MAX_LUNS_PER_DEVICE 256
+
struct pqi_scsi_dev {
- int devtype; /* as reported by INQUIRY commmand */
+ int devtype; /* as reported by INQUIRY command */
u8 device_type; /* as reported by */
/* BMIC_IDENTIFY_PHYSICAL_DEVICE */
/* only valid for devtype = TYPE_DISK */
@@ -1127,9 +1128,12 @@ struct pqi_scsi_dev {
u8 box[8];
u16 phys_connector[8];
u8 phy_id;
+ u8 ncq_prio_enable;
+ u8 ncq_prio_support;
+ u8 multi_lun_device_lun_count;
bool raid_bypass_configured; /* RAID bypass configured */
bool raid_bypass_enabled; /* RAID bypass enabled */
- u32 next_bypass_group;
+ u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW];
struct raid_map *raid_map; /* RAID bypass map */
u32 max_transfer_encrypted;
@@ -1142,9 +1146,8 @@ struct pqi_scsi_dev {
struct list_head delete_list_entry;
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
- atomic_t scsi_cmds_outstanding;
+ atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
atomic_t raid_bypass_cnt;
- u8 page_83_identifier[16];
};
/* VPD inquiry pages */
@@ -1266,6 +1269,12 @@ struct pqi_event {
#define PQI_CTRL_PRODUCT_REVISION_A 0
#define PQI_CTRL_PRODUCT_REVISION_B 1
+enum pqi_ctrl_removal_state {
+ PQI_CTRL_PRESENT = 0,
+ PQI_CTRL_GRACEFUL_REMOVAL,
+ PQI_CTRL_SURPRISE_REMOVAL
+};
+
struct pqi_ctrl_info {
unsigned int ctrl_id;
struct pci_dev *pci_dev;
@@ -1326,6 +1335,7 @@ struct pqi_ctrl_info {
bool controller_online;
bool block_requests;
bool scan_blocked;
+ u8 logical_volume_rescan_needed : 1;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
@@ -1333,15 +1343,15 @@ struct pqi_ctrl_info {
u8 soft_reset_handshake_supported : 1;
u8 raid_iu_timeout_supported : 1;
u8 tmf_iu_timeout_supported : 1;
- u8 unique_wwid_in_report_phys_lun_supported : 1;
u8 firmware_triage_supported : 1;
u8 rpl_extended_format_4_5_supported : 1;
+ u8 multi_lun_device_supported : 1;
u8 enable_r1_writes : 1;
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
u8 lv_drive_type_mix_valid : 1;
u8 enable_stream_detection : 1;
-
+ u8 disable_managed_interrupts : 1;
u8 ciss_report_log_flags;
u32 max_transfer_encrypted_sas_sata;
u32 max_transfer_encrypted_nvme;
@@ -1385,6 +1395,7 @@ struct pqi_ctrl_info {
struct work_struct ofa_quiesce_work;
u32 ofa_bytes_requested;
u16 ofa_cancel_reason;
+ enum pqi_ctrl_removal_state ctrl_removal_state;
};
enum pqi_ctrl_mode {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index f0897d587454..b971fbe3b3a1 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.12-055"
+#define DRIVER_VERSION "2.1.18-045"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 12
-#define DRIVER_REVISION 55
+#define DRIVER_RELEASE 18
+#define DRIVER_REVISION 45
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -54,6 +54,16 @@ MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
+struct pqi_cmd_priv {
+ int this_residual;
+};
+
+static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+static void pqi_verify_structures(void);
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
static void pqi_ctrl_offline_worker(struct work_struct *work);
@@ -68,7 +78,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass);
+ struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
@@ -84,7 +94,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs);
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -164,6 +175,18 @@ module_param_named(hide_vsep,
pqi_hide_vsep, int, 0644);
MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
+static int pqi_disable_managed_interrupts;
+module_param_named(disable_managed_interrupts,
+ pqi_disable_managed_interrupts, int, 0644);
+MODULE_PARM_DESC(disable_managed_interrupts,
+ "Disable the kernel automatically assigning SMP affinity to IRQs.");
+
+static unsigned int pqi_ctrl_ready_timeout_secs;
+module_param_named(ctrl_ready_timeout,
+ pqi_ctrl_ready_timeout_secs, uint, 0644);
+MODULE_PARM_DESC(ctrl_ready_timeout,
+ "Timeout in seconds for driver to wait for controller ready.");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
@@ -376,7 +399,7 @@ static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
displayed_warning = false;
start_jiffies = jiffies;
- warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
while (atomic_read(&ctrl_info->num_busy_threads) >
atomic_read(&ctrl_info->num_blocked_threads)) {
@@ -385,7 +408,7 @@ static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
"waiting %u seconds for driver activity to quiesce\n",
jiffies_to_msecs(jiffies - start_jiffies) / 1000);
displayed_warning = true;
- warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
@@ -462,7 +485,7 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
}
-#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
+#define PQI_RESCAN_WORK_DELAY (10 * HZ)
static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
{
@@ -1038,7 +1061,7 @@ static int pqi_write_current_time_to_host_wellness(
return rc;
}
-#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
+#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
static void pqi_update_time_worker(struct work_struct *work)
{
@@ -1181,8 +1204,8 @@ static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **b
for (i = 0; i < num_physicals; i++) {
memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
- memset(&rpl_16byte_wwid_list->lun_entries[i].wwid, 0, 8);
- memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
+ memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
+ memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
@@ -1549,6 +1572,7 @@ no_buffer:
device->volume_offline = volume_offline;
}
+#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
@@ -1586,10 +1610,9 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
&id_phys->alternate_paths_phys_connector,
sizeof(device->phys_connector));
device->bay = id_phys->phys_bay_in_box;
-
- memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
- sizeof(device->page_83_identifier));
-
+ device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
+ if (!device->multi_lun_device_lun_count)
+ device->multi_lun_device_lun_count = 1;
if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
id_phys->phy_count)
device->phy_id =
@@ -1597,6 +1620,10 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
else
device->phy_id = 0xFF;
+ device->ncq_prio_support =
+ ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
+ PQI_DEVICE_NCQ_PRIO_SUPPORTED);
+
return 0;
}
@@ -1868,15 +1895,18 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{
int rc;
+ int lun;
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
- if (rc)
- dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun,
- atomic_read(&device->scsi_cmds_outstanding));
+ for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
+ PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
+ if (rc)
+ dev_err(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
+ ctrl_info->scsi_host->host_no, device->bus,
+ device->target, lun,
+ atomic_read(&device->scsi_cmds_outstanding[lun]));
+ }
if (pqi_is_logical_device(device))
scsi_remove_device(device->sdev);
@@ -2008,10 +2038,27 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
+static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
+{
+ u32 raid_map1_size;
+ u32 raid_map2_size;
+
+ if (raid_map1 == NULL || raid_map2 == NULL)
+ return raid_map1 == raid_map2;
+
+ raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
+ raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
+
+ if (raid_map1_size != raid_map2_size)
+ return false;
+
+ return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
+}
+
/* Assumes the SCSI device list lock is held. */
-static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
- struct pqi_scsi_dev *new_device)
+static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
{
existing_device->device_type = new_device->device_type;
existing_device->bus = new_device->bus;
@@ -2021,50 +2068,51 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
existing_device->target_lun_valid = true;
}
- if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
- existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
- new_device->volume_status == CISS_LV_OK)
- existing_device->rescan = true;
-
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->is_external_raid_device =
- new_device->is_external_raid_device;
- existing_device->is_expander_smp_device =
- new_device->is_expander_smp_device;
- existing_device->aio_enabled = new_device->aio_enabled;
- memcpy(existing_device->vendor, new_device->vendor,
- sizeof(existing_device->vendor));
- memcpy(existing_device->model, new_device->model,
- sizeof(existing_device->model));
+ memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
+ memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
existing_device->sas_address = new_device->sas_address;
- existing_device->raid_level = new_device->raid_level;
existing_device->queue_depth = new_device->queue_depth;
- existing_device->aio_handle = new_device->aio_handle;
- existing_device->volume_status = new_device->volume_status;
- existing_device->active_path_index = new_device->active_path_index;
- existing_device->phy_id = new_device->phy_id;
- existing_device->path_map = new_device->path_map;
- existing_device->bay = new_device->bay;
- existing_device->box_index = new_device->box_index;
- existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
- existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
- memcpy(existing_device->box, new_device->box,
- sizeof(existing_device->box));
- memcpy(existing_device->phys_connector, new_device->phys_connector,
- sizeof(existing_device->phys_connector));
- existing_device->next_bypass_group = 0;
- kfree(existing_device->raid_map);
- existing_device->raid_map = new_device->raid_map;
- existing_device->raid_bypass_configured =
- new_device->raid_bypass_configured;
- existing_device->raid_bypass_enabled =
- new_device->raid_bypass_enabled;
existing_device->device_offline = false;
- /* To prevent this from being freed later. */
- new_device->raid_map = NULL;
+ if (pqi_is_logical_device(existing_device)) {
+ existing_device->is_external_raid_device = new_device->is_external_raid_device;
+
+ if (existing_device->devtype == TYPE_DISK) {
+ existing_device->raid_level = new_device->raid_level;
+ existing_device->volume_status = new_device->volume_status;
+ if (ctrl_info->logical_volume_rescan_needed)
+ existing_device->rescan = true;
+ memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
+ if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
+ kfree(existing_device->raid_map);
+ existing_device->raid_map = new_device->raid_map;
+ /* To prevent this from being freed later. */
+ new_device->raid_map = NULL;
+ }
+ existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
+ }
+ } else {
+ existing_device->aio_enabled = new_device->aio_enabled;
+ existing_device->aio_handle = new_device->aio_handle;
+ existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
+ existing_device->active_path_index = new_device->active_path_index;
+ existing_device->phy_id = new_device->phy_id;
+ existing_device->path_map = new_device->path_map;
+ existing_device->bay = new_device->bay;
+ existing_device->box_index = new_device->box_index;
+ existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
+ existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
+ memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
+ memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
+
+ existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
+ if (existing_device->multi_lun_device_lun_count == 0)
+ existing_device->multi_lun_device_lun_count = 1;
+ }
}
static inline void pqi_free_device(struct pqi_scsi_dev *device)
@@ -2141,7 +2189,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
*/
device->new_device = false;
matching_device->device_gone = false;
- pqi_scsi_update_device(matching_device, device);
+ pqi_scsi_update_device(ctrl_info, matching_device, device);
break;
case DEVICE_NOT_FOUND:
/*
@@ -2213,8 +2261,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
/*
- * Notify the SCSI ML if the queue depth of any existing device has
- * changed.
+ * Notify the SML of any existing device changes such as;
+ * queue depth, device size.
*/
list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
@@ -2243,6 +2291,9 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
}
+
+ ctrl_info->logical_volume_rescan_needed = false;
+
}
static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
@@ -2274,16 +2325,12 @@ static inline void pqi_mask_device(u8 *scsi3addr)
scsi3addr[3] |= 0xc0;
}
-static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
+static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
{
- switch (device->device_type) {
- case SA_DEVICE_TYPE_SAS:
- case SA_DEVICE_TYPE_EXPANDER_SMP:
- case SA_DEVICE_TYPE_SES:
- return true;
- }
+ if (pqi_is_logical_device(device))
+ return false;
- return false;
+ return (device->path_map & (device->path_map - 1)) != 0;
}
static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
@@ -2291,17 +2338,6 @@ static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
}
-static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct report_phys_lun_16byte_wwid *phys_lun)
-{
- if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
- ctrl_info->rpl_extended_format_4_5_supported ||
- pqi_is_device_with_sas_address(device))
- memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
- else
- memcpy(&device->wwid[8], device->page_83_identifier, 8);
-}
-
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int i;
@@ -2469,7 +2505,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
- pqi_set_physical_device_wwid(ctrl_info, device, phys_lun);
+ memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
if ((phys_lun->device_flags &
CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
phys_lun->aio_handle) {
@@ -2482,8 +2518,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
sizeof(device->volume_id));
}
- if (pqi_is_device_with_sas_address(device))
- device->sas_address = get_unaligned_be64(&device->wwid[8]);
+ device->sas_address = get_unaligned_be64(&device->wwid[0]);
new_device_list[num_valid_devices++] = device;
}
@@ -2507,25 +2542,6 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
- struct pqi_scsi_dev *next;
-
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-
- list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- list_del(&device->scsi_device_list_entry);
- pqi_free_device(device);
- }
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -2950,11 +2966,11 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
if (rmd.is_write) {
pqi_calc_aio_r1_nexus(raid_map, &rmd);
} else {
- group = device->next_bypass_group;
+ group = device->next_bypass_group[rmd.map_index];
next_bypass_group = group + 1;
if (next_bypass_group >= rmd.layout_map_count)
next_bypass_group = 0;
- device->next_bypass_group = next_bypass_group;
+ device->next_bypass_group[rmd.map_index] = next_bypass_group;
rmd.map_index += group * rmd.data_disks_per_row;
}
} else if ((device->raid_level == SA_RAID_5 ||
@@ -3009,7 +3025,7 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
rmd.cdb, rmd.cdb_length, queue_group,
- encryption_info_ptr, true);
+ encryption_info_ptr, true, false);
}
#define PQI_STATUS_IDLE 0x0
@@ -3034,7 +3050,7 @@ static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
u8 status;
pqi_registers = ctrl_info->pqi_registers;
- timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
signature = readq(&pqi_registers->signature);
@@ -3213,12 +3229,14 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
int residual_count;
int xfer_count;
bool device_offline;
+ struct pqi_scsi_dev *device;
scmd = io_request->scmd;
error_info = io_request->error_info;
host_byte = DID_OK;
sense_data_length = 0;
device_offline = false;
+ device = scmd->device->hostdata;
switch (error_info->service_response) {
case PQI_AIO_SERV_RESPONSE_COMPLETE:
@@ -3243,8 +3261,14 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_AIO_PATH_DISABLED:
pqi_aio_path_disabled(io_request);
- scsi_status = SAM_STAT_GOOD;
- io_request->status = -EAGAIN;
+ if (pqi_is_multipath_device(device)) {
+ pqi_device_remove_start(device);
+ host_byte = DID_NO_CONNECT;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ } else {
+ scsi_status = SAM_STAT_GOOD;
+ io_request->status = -EAGAIN;
+ }
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
@@ -3318,6 +3342,9 @@ static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_inf
case SOP_TMF_REJECTED:
rc = -EAGAIN;
break;
+ case SOP_RC_INCORRECT_LOGICAL_UNIT:
+ rc = -ENODEV;
+ break;
default:
rc = -EIO;
break;
@@ -3520,7 +3547,7 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
u8 status;
unsigned long timeout;
- timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = pqi_read_soft_reset_status(ctrl_info);
@@ -3659,6 +3686,20 @@ static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
return ack_event;
}
+static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->raid_bypass_enabled)
+ device->raid_bypass_enabled = false;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
@@ -3684,6 +3725,10 @@ static void pqi_event_worker(struct work_struct *work)
} else {
ack_event = true;
rescan_needed = true;
+ if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
+ ctrl_info->logical_volume_rescan_needed = true;
+ else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
+ pqi_disable_raid_bypass(ctrl_info);
}
if (ack_event)
pqi_acknowledge_event(ctrl_info, event);
@@ -3691,14 +3736,17 @@ static void pqi_event_worker(struct work_struct *work)
event++;
}
+#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
+
if (rescan_needed)
- pqi_schedule_rescan_worker_delayed(ctrl_info);
+ pqi_schedule_rescan_worker_with_delay(ctrl_info,
+ PQI_RESCAN_WORK_FOR_EVENT_DELAY);
out:
pqi_ctrl_unbusy(ctrl_info);
}
-#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
@@ -3986,10 +4034,14 @@ static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
int num_vectors_enabled;
+ unsigned int flags = PCI_IRQ_MSIX;
+
+ if (!pqi_disable_managed_interrupts)
+ flags |= PCI_IRQ_AFFINITY;
num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+ flags);
if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
"MSI-X init failed with error %d\n",
@@ -4245,7 +4297,7 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
@@ -4339,7 +4391,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
admin_queues = &ctrl_info->admin_queues;
oq_ci = admin_queues->oq_ci_copy;
- timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
oq_pi = readl(admin_queues->oq_pi);
@@ -4454,7 +4506,7 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
rc = 0;
break;
}
@@ -5451,6 +5503,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
+ request->ml_device_lun_number = (u8)scmd->device->lun;
cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
memcpy(request->cdb, scmd->cmnd, cdb_length);
@@ -5478,10 +5531,10 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
}
switch (scmd->sc_data_direction) {
- case DMA_TO_DEVICE:
+ case DMA_FROM_DEVICE:
request->data_direction = SOP_READ_FLAG;
break;
- case DMA_FROM_DEVICE:
+ case DMA_TO_DEVICE:
request->data_direction = SOP_WRITE_FLAG;
break;
case DMA_NONE:
@@ -5555,44 +5608,86 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
scsi_dma_unmap(scmd);
if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
set_host_byte(scmd, DID_IMM_RETRY);
- scmd->SCp.this_residual++;
+ pqi_cmd_priv(scmd)->this_residual++;
}
pqi_free_io_request(io_request);
pqi_scsi_done(scmd);
}
+static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
+{
+ bool io_high_prio;
+ int priority_class;
+
+ io_high_prio = false;
+
+ if (device->ncq_prio_enable) {
+ priority_class =
+ IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
+ if (priority_class == IOPRIO_CLASS_RT) {
+ /* Set NCQ priority for read/write commands. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_16:
+ case READ_16:
+ case WRITE_12:
+ case READ_12:
+ case WRITE_10:
+ case READ_10:
+ case WRITE_6:
+ case READ_6:
+ io_high_prio = true;
+ break;
+ }
+ }
+ }
+
+ return io_high_prio;
+}
+
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
+ bool io_high_prio;
+
+ io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
+
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
- scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
+ scmd->cmnd, scmd->cmd_len, queue_group, NULL,
+ false, io_high_prio);
}
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info, bool raid_bypass)
+ struct pqi_encryption_info *encryption_info, bool raid_bypass,
+ bool io_high_prio)
{
int rc;
struct pqi_io_request *io_request;
struct pqi_aio_path_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
request = io_request->iu;
- memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
+ memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
put_unaligned_le32(aio_handle, &request->nexus_id);
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
cdb_length = sizeof(request->cdb);
request->cdb_length = cdb_length;
@@ -5779,7 +5874,7 @@ static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
return false;
- return scmd->SCp.this_residual == 0;
+ return pqi_cmd_priv(scmd)->this_residual == 0;
}
/*
@@ -5802,7 +5897,7 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
return;
}
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
}
static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
@@ -5897,7 +5992,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
return 0;
}
- atomic_inc(&device->scsi_cmds_outstanding);
+ atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
ctrl_info = shost_to_hba(shost);
@@ -5943,7 +6038,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
out:
if (rc)
- atomic_dec(&device->scsi_cmds_outstanding);
+ atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
return rc;
}
@@ -6008,7 +6103,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
displayed_warning = false;
start_jiffies = jiffies;
- warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
while (1) {
queued_io_count = pqi_queued_io_count(ctrl_info);
@@ -6023,7 +6118,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
"waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
displayed_warning = true;
- warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
@@ -6083,7 +6178,7 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, unsigned long timeout_msecs)
+ struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
{
int cmds_outstanding;
unsigned long start_jiffies;
@@ -6091,26 +6186,28 @@ static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
unsigned long msecs_waiting;
start_jiffies = jiffies;
- warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
- while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return -ENXIO;
+ while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
+ if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ }
msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
if (msecs_waiting >= timeout_msecs) {
dev_err(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
+ lun, msecs_waiting / 1000, cmds_outstanding);
return -ETIMEDOUT;
}
if (time_after(jiffies, warning_timeout)) {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus, device->target,
- device->lun, msecs_waiting / 1000, cmds_outstanding);
- warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ lun, msecs_waiting / 1000, cmds_outstanding);
+ warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
}
usleep_range(1000, 2000);
}
@@ -6129,7 +6226,7 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct completion *wait)
+ struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
{
int rc;
unsigned int wait_secs;
@@ -6139,7 +6236,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
while (1) {
if (wait_for_completion_io_timeout(wait,
- PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
+ PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
rc = 0;
break;
}
@@ -6151,10 +6248,10 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
}
wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
- cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
+ cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
- ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
+ ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
}
return rc;
@@ -6162,13 +6259,15 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
-static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
struct pqi_io_request *io_request;
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -6182,6 +6281,8 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
put_unaligned_le16(io_request->index, &request->request_id);
memcpy(request->lun_number, device->scsi3addr,
sizeof(request->lun_number));
+ if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
+ request->ml_device_lun_number = (u8)scmd->device->lun;
request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
if (ctrl_info->tmf_iu_timeout_supported)
put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
@@ -6189,7 +6290,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
io_request);
- rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+ rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
if (rc == 0)
rc = io_request->status;
@@ -6203,16 +6304,18 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *d
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
-static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
+static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int reset_rc;
int wait_rc;
unsigned int retries;
unsigned long timeout_msecs;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
for (retries = 0;;) {
- reset_rc = pqi_lun_reset(ctrl_info, device);
- if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
+ reset_rc = pqi_lun_reset(ctrl_info, scmd);
+ if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
break;
msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
}
@@ -6220,18 +6323,19 @@ static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pq
timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
- wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
+ wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
if (wait_rc && reset_rc == 0)
reset_rc = wait_rc;
return reset_rc == 0 ? SUCCESS : FAILED;
}
-static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{
int rc;
+ struct pqi_scsi_dev *device;
+ device = scmd->device->hostdata;
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
pqi_fail_io_queued_for_device(ctrl_info, device);
@@ -6239,7 +6343,7 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
if (rc)
rc = FAILED;
else
- rc = pqi_lun_reset_with_retries(ctrl_info, device);
+ rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
pqi_ctrl_unblock_requests(ctrl_info);
return rc;
@@ -6261,18 +6365,18 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
shost->host_no,
- device->bus, device->target, device->lun,
+ device->bus, device->target, (u32)scmd->device->lun,
scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
rc = FAILED;
else
- rc = pqi_device_reset(ctrl_info, device);
+ rc = pqi_device_reset(ctrl_info, scmd);
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
- shost->host_no, device->bus, device->target, device->lun,
+ shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
mutex_unlock(&ctrl_info->lun_reset_mutex);
@@ -6332,12 +6436,12 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int pqi_map_queues(struct Scsi_Host *shost)
+static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ ctrl_info->pci_dev, 0);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
@@ -6361,6 +6465,35 @@ static int pqi_slave_configure(struct scsi_device *sdev)
return rc;
}
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_scsi_dev *device;
+ int mutex_acquired;
+ unsigned long flags;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
+ if (!mutex_acquired)
+ return;
+
+ device = sdev->hostdata;
+ if (!device) {
+ mutex_unlock(&ctrl_info->scan_mutex);
+ return;
+ }
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_del(&device->scsi_device_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ mutex_unlock(&ctrl_info->scan_mutex);
+
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+}
+
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
@@ -6875,6 +7008,9 @@ static ssize_t pqi_unique_id_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6911,6 +7047,9 @@ static ssize_t pqi_lunid_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -6946,6 +7085,9 @@ static ssize_t pqi_path_info_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7023,10 +7165,13 @@ static ssize_t pqi_sas_address_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- if (!device || !pqi_is_device_with_sas_address(device)) {
+ if (!device) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -7049,6 +7194,9 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7078,6 +7226,9 @@ static ssize_t pqi_raid_level_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7108,6 +7259,9 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
@@ -7123,6 +7277,74 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
}
+static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int output_len = 0;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENODEV;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ output_len = snprintf(buf, PAGE_SIZE, "%d\n",
+ device->ncq_prio_enable);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return output_len;
+}
+
+static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ u8 ncq_prio_enable = 0;
+
+ if (kstrtou8(buf, 0, &ncq_prio_enable))
+ return -EINVAL;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ if (!device->ncq_prio_support ||
+ !device->is_physical_device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -EINVAL;
+ }
+
+ device->ncq_prio_enable = ncq_prio_enable;
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return strlen(buf);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@@ -7130,6 +7352,8 @@ static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
+static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
+ pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
@@ -7139,6 +7363,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_ssd_smart_path_enabled.attr,
&dev_attr_raid_level.attr,
&dev_attr_raid_bypass_cnt.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL
};
@@ -7156,9 +7381,11 @@ static struct scsi_host_template pqi_driver_template = {
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
.slave_configure = pqi_slave_configure,
+ .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
.sdev_groups = pqi_sdev_groups,
.shost_groups = pqi_shost_groups,
+ .cmd_size = sizeof(struct pqi_cmd_priv),
};
static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
@@ -7177,6 +7404,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
shost->this_id = -1;
shost->max_channel = PQI_MAX_BUS;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
shost->max_lun = ~0;
shost->max_id = ~0;
shost->max_sectors = ctrl_info->max_sectors;
@@ -7245,8 +7473,7 @@ static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
reset_reg.all_bits = readl(&pqi_registers->device_reset);
if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
break;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info)) {
+ if (!sis_is_firmware_running(ctrl_info)) {
rc = -ENXIO;
break;
}
@@ -7350,6 +7577,9 @@ static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
sizeof(identify->vendor_id));
ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
+ dev_info(&ctrl_info->pci_dev->dev,
+ "Firmware version: %s\n", ctrl_info->firmware_version);
+
out:
kfree(identify);
@@ -7514,10 +7744,6 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
break;
- case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
- ctrl_info->unique_wwid_in_report_phys_lun_supported =
- firmware_feature->enabled;
- break;
case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
ctrl_info->firmware_triage_supported = firmware_feature->enabled;
pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
@@ -7525,6 +7751,9 @@ static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
break;
+ case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
+ ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
+ break;
}
pqi_firmware_feature_status(ctrl_info, firmware_feature);
@@ -7616,11 +7845,6 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_status = pqi_firmware_feature_status,
},
{
- .feature_name = "Unique WWID in Report Physical LUN",
- .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
- .feature_status = pqi_ctrl_update_feature_flags,
- },
- {
.feature_name = "Firmware Triage",
.feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
.feature_status = pqi_ctrl_update_feature_flags,
@@ -7630,6 +7854,11 @@ static struct pqi_firmware_feature pqi_firmware_features[] = {
.feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
.feature_status = pqi_ctrl_update_feature_flags,
},
+ {
+ .feature_name = "Multi-LUN Target",
+ .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
+ .feature_status = pqi_ctrl_update_feature_flags,
+ },
};
static void pqi_process_firmware_features(
@@ -7729,9 +7958,9 @@ static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
ctrl_info->enable_r6_writes = false;
ctrl_info->raid_iu_timeout_supported = false;
ctrl_info->tmf_iu_timeout_supported = false;
- ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
ctrl_info->firmware_triage_supported = false;
ctrl_info->rpl_extended_format_4_5_supported = false;
+ ctrl_info->multi_lun_device_supported = false;
}
static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
@@ -7857,6 +8086,21 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
return pqi_revert_to_sis_mode(ctrl_info);
}
+static void pqi_perform_lockup_action(void)
+{
+ switch (pqi_lockup_action) {
+ case PANIC:
+ panic("FATAL: Smart Family Controller lockup detected");
+ break;
+ case REBOOT:
+ emergency_restart();
+ break;
+ case NONE:
+ default:
+ break;
+ }
+}
+
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -7869,7 +8113,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
}
sis_soft_reset(ctrl_info);
- msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
+ ssleep(PQI_POST_RESET_DELAY_SECS);
} else {
rc = pqi_force_sis_mode(ctrl_info);
if (rc)
@@ -7881,8 +8125,15 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
* commands.
*/
rc = sis_wait_for_ctrl_ready(ctrl_info);
- if (rc)
+ if (rc) {
+ if (reset_devices) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "kdump init failed with error %d\n", rc);
+ pqi_lockup_action = REBOOT;
+ pqi_perform_lockup_action();
+ }
return rc;
+ }
/*
* Get the controller properties. This allows us to determine
@@ -8366,6 +8617,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
ctrl_info->max_write_raid_1_10_2drive = ~0;
ctrl_info->max_write_raid_1_10_3drive = ~0;
+ ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
return ctrl_info;
}
@@ -8383,7 +8635,6 @@ static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
{
- pqi_stop_heartbeat_timer(ctrl_info);
pqi_free_interrupts(ctrl_info);
if (ctrl_info->queue_memory_base)
dma_free_coherent(&ctrl_info->pci_dev->dev,
@@ -8408,9 +8659,15 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
+ ctrl_info->controller_online = false;
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
+ if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ ctrl_info->pqi_mode_enabled = false;
+ }
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8607,21 +8864,6 @@ static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int de
return pqi_ctrl_init_resume(ctrl_info);
}
-static void pqi_perform_lockup_action(void)
-{
- switch (pqi_lockup_action) {
- case PANIC:
- panic("FATAL: Smart Family Controller lockup detected");
- break;
- case REBOOT:
- emergency_restart();
- break;
- case NONE:
- default:
- break;
- }
-}
-
static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
.data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
.status = SAM_STAT_CHECK_CONDITION,
@@ -8712,7 +8954,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
int rc;
- int node, cp_node;
+ int node;
struct pqi_ctrl_info *ctrl_info;
pqi_print_ctrl_info(pci_dev, id);
@@ -8731,10 +8973,10 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
node = dev_to_node(&pci_dev->dev);
if (node == NUMA_NO_NODE) {
- cp_node = cpu_to_node(0);
- if (cp_node == NUMA_NO_NODE)
- cp_node = 0;
- set_dev_node(&pci_dev->dev, cp_node);
+ node = cpu_to_node(0);
+ if (node == NUMA_NO_NODE)
+ node = 0;
+ set_dev_node(&pci_dev->dev, node);
}
ctrl_info = pqi_alloc_ctrl_info(node);
@@ -8765,11 +9007,18 @@ error:
static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
+ u16 vendor_id;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
+ pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
+ if (vendor_id == 0xffff)
+ ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
+ else
+ ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
+
pqi_remove_ctrl(ctrl_info);
}
@@ -8793,6 +9042,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info) {
@@ -8808,11 +9058,16 @@ static void pqi_shutdown(struct pci_dev *pci_dev)
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
+ if (system_state == SYSTEM_RESTART)
+ shutdown_event = RESTART;
+ else
+ shutdown_event = SHUTDOWN;
+
/*
* Write all data in the controller's battery-backed cache to
* storage.
*/
- rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
+ rc = pqi_flush_cache(ctrl_info, shutdown_event);
if (rc)
dev_err(&pci_dev->dev,
"unable to flush controller cache\n");
@@ -8840,15 +9095,49 @@ static void pqi_process_lockup_action_param(void)
DRIVER_NAME_SHORT, pqi_lockup_action_param);
}
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
+#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
+
+static void pqi_process_ctrl_ready_timeout_param(void)
+{
+ if (pqi_ctrl_ready_timeout_secs == 0)
+ return;
+
+ if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
+ } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
+ pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
+ DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
+ pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
+ }
+
+ sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
+}
+
static void pqi_process_module_params(void)
{
pqi_process_lockup_action_param();
+ pqi_process_ctrl_ready_timeout_param();
}
-static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
+#if defined(CONFIG_PM)
+
+static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
{
+ if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
+ return RESTART;
+
+ return SUSPEND;
+}
+
+static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
+{
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
pqi_wait_until_ofa_finished(ctrl_info);
@@ -8858,16 +9147,17 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
pqi_ctrl_block_device_reset(ctrl_info);
pqi_ctrl_block_requests(ctrl_info);
pqi_ctrl_wait_until_quiesced(ctrl_info);
- pqi_flush_cache(ctrl_info, SUSPEND);
- pqi_stop_heartbeat_timer(ctrl_info);
- pqi_crash_if_pending_command(ctrl_info);
+ if (suspend) {
+ enum bmic_flush_cache_shutdown_event shutdown_event;
- if (state.event == PM_EVENT_FREEZE)
- return 0;
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+ }
- pci_save_state(pci_dev);
- pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_crash_if_pending_command(ctrl_info);
+ pqi_free_irqs(ctrl_info);
ctrl_info->controller_online = false;
ctrl_info->pqi_mode_enabled = false;
@@ -8875,44 +9165,89 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat
return 0;
}
-static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
+static __maybe_unused int pqi_suspend(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, true);
+}
+
+static int pqi_resume_or_restore(struct device *dev)
{
int rc;
+ struct pci_dev *pci_dev;
struct pqi_ctrl_info *ctrl_info;
+ pci_dev = to_pci_dev(dev);
ctrl_info = pci_get_drvdata(pci_dev);
- if (pci_dev->current_state != PCI_D0) {
- ctrl_info->max_hw_queue_index = 0;
- pqi_free_interrupts(ctrl_info);
- pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
- rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
- IRQF_SHARED, DRIVER_NAME_SHORT,
- &ctrl_info->queue_groups[0]);
- if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "irq %u init failed with error %d\n",
- pci_dev->irq, rc);
- return rc;
- }
- pqi_ctrl_unblock_device_reset(ctrl_info);
- pqi_ctrl_unblock_requests(ctrl_info);
- pqi_scsi_unblock_requests(ctrl_info);
- pqi_ctrl_unblock_scan(ctrl_info);
- return 0;
- }
-
- pci_set_power_state(pci_dev, PCI_D0);
- pci_restore_state(pci_dev);
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
pqi_ctrl_unblock_device_reset(ctrl_info);
pqi_ctrl_unblock_requests(ctrl_info);
pqi_scsi_unblock_requests(ctrl_info);
pqi_ctrl_unblock_scan(ctrl_info);
+ ssleep(PQI_POST_RESET_DELAY_SECS);
+
return pqi_ctrl_init_resume(ctrl_info);
}
+static int pqi_freeze(struct device *dev)
+{
+ return pqi_suspend_or_freeze(dev, false);
+}
+
+static int pqi_thaw(struct device *dev)
+{
+ int rc;
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ rc = pqi_request_irqs(ctrl_info);
+ if (rc)
+ return rc;
+
+ ctrl_info->controller_online = true;
+ ctrl_info->pqi_mode_enabled = true;
+
+ pqi_ctrl_unblock_device_reset(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ pqi_scsi_unblock_requests(ctrl_info);
+ pqi_ctrl_unblock_scan(ctrl_info);
+
+ return 0;
+}
+
+static int pqi_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev;
+ struct pqi_ctrl_info *ctrl_info;
+ enum bmic_flush_cache_shutdown_event shutdown_event;
+
+ pci_dev = to_pci_dev(dev);
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
+ pqi_flush_cache(ctrl_info, shutdown_event);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pqi_pm_ops = {
+ .suspend = pqi_suspend,
+ .resume = pqi_resume_or_restore,
+ .freeze = pqi_freeze,
+ .thaw = pqi_thaw,
+ .poweroff = pqi_poweroff,
+ .restore = pqi_resume_or_restore,
+};
+
+#endif /* CONFIG_PM */
+
/* Define the PCI IDs for the controllers that we support. */
static const struct pci_device_id pqi_pci_id_table[] = {
{
@@ -8945,10 +9280,6 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- 0x193d, 0x8460)
- },
- {
- PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x193d, 0x1104)
},
{
@@ -9045,6 +9376,34 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006c)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x006f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0070)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0071)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1bd4, 0x0072)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x19e5, 0xd227)
},
{
@@ -9077,6 +9436,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x0659)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
@@ -9201,6 +9564,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1304)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
},
{
@@ -9261,6 +9628,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1463)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1470)
},
{
@@ -9273,6 +9644,14 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1473)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1474)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1480)
},
{
@@ -9297,6 +9676,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
},
{
@@ -9313,6 +9704,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
},
{
@@ -9417,6 +9812,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x036f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0381)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0382)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1590, 0x0383)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1d8d, 0x0800)
},
{
@@ -9441,6 +9852,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f0c, 0x3161)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x5445)
},
{
@@ -9453,6 +9868,30 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x5449)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544b)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544d)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544e)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x544f)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0b27)
},
{
@@ -9465,6 +9904,46 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0101)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cc4, 0x0201)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0220)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0221)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0520)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0522)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0620)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0621)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0622)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_LENOVO, 0x0623)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
@@ -9479,8 +9958,9 @@ static struct pci_driver pqi_pci_driver = {
.remove = pqi_pci_remove,
.shutdown = pqi_shutdown,
#if defined(CONFIG_PM)
- .suspend = pqi_suspend,
- .resume = pqi_resume,
+ .driver = {
+ .pm = &pqi_pm_ops
+ },
#endif
};
@@ -9489,6 +9969,8 @@ static int __init pqi_init(void)
int rc;
pr_info(DRIVER_NAME "\n");
+ pqi_verify_structures();
+ sis_verify_structures();
pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
if (!pqi_sas_transport_template)
@@ -9512,7 +9994,7 @@ static void __exit pqi_cleanup(void)
module_init(pqi_init);
module_exit(pqi_cleanup);
-static void __attribute__((unused)) verify_structures(void)
+static void pqi_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
sis_host_to_ctrl_doorbell) != 0x20);
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index dea4ebaf1677..13e8c539010e 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index d66eb8ea161c..5811fb3c22a9 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -86,13 +86,15 @@ struct sis_base_struct {
#pragma pack()
+unsigned int sis_ctrl_ready_timeout_secs = SIS_CTRL_READY_TIMEOUT_SECS;
+
static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
unsigned int timeout_secs)
{
unsigned long timeout;
u32 status;
- timeout = (timeout_secs * PQI_HZ) + jiffies;
+ timeout = (timeout_secs * HZ) + jiffies;
while (1) {
status = readl(&ctrl_info->registers->sis_firmware_status);
@@ -122,7 +124,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
{
return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
- SIS_CTRL_READY_TIMEOUT_SECS);
+ sis_ctrl_ready_timeout_secs);
}
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
@@ -138,7 +140,7 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
status = readl(&ctrl_info->registers->sis_firmware_status);
- if (status & SIS_CTRL_KERNEL_PANIC)
+ if (status != ~0 && (status & SIS_CTRL_KERNEL_PANIC))
running = false;
else
running = true;
@@ -194,6 +196,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
/* Disable doorbell interrupts by masking all interrupts. */
writel(~0, &registers->sis_interrupt_mask);
+ usleep_range(1000, 2000);
/*
* Force the completion of the interrupt mask register write before
@@ -209,7 +212,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
* the top of the loop in order to give the controller time to start
* processing the command before we start polling.
*/
- timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
@@ -355,7 +358,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
u32 doorbell_register;
unsigned long timeout;
- timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
doorbell_register =
@@ -383,6 +386,7 @@ static int sis_wait_for_doorbell_bit_to_clear(
static inline int sis_set_doorbell_bit(struct pqi_ctrl_info *ctrl_info, u32 bit)
{
writel(bit, &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ usleep_range(1000, 2000);
return sis_wait_for_doorbell_bit_to_clear(ctrl_info, bit);
}
@@ -423,6 +427,7 @@ int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
{
writel(value, &ctrl_info->registers->sis_driver_scratch);
+ usleep_range(1000, 2000);
}
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
@@ -452,7 +457,7 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
enum sis_fw_triage_status status;
unsigned long timeout;
- timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
+ timeout = (SIS_FW_TRIAGE_STATUS_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
status = sis_read_firmware_triage_status(ctrl_info);
if (status == FW_TRIAGE_COND_INVALID) {
@@ -479,7 +484,7 @@ int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info)
return rc;
}
-static void __attribute__((unused)) verify_structures(void)
+void sis_verify_structures(void)
{
BUILD_BUG_ON(offsetof(struct sis_base_struct,
revision) != 0x0);
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index bd92ff49f385..9dcbae96a5c6 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -12,6 +12,7 @@
#if !defined(_SMARTPQI_SIS_H)
#define _SMARTPQI_SIS_H
+void sis_verify_structures(void);
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info);
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
@@ -31,4 +32,6 @@ void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
u32 sis_get_product_id(struct pqi_ctrl_info *ctrl_info);
int sis_wait_for_fw_triage_completion(struct pqi_ctrl_info *ctrl_info);
+extern unsigned int sis_ctrl_ready_timeout_secs;
+
#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/snic/cq_desc.h b/drivers/scsi/snic/cq_desc.h
index a5290562c1fa..52a916fd0824 100644
--- a/drivers/scsi/snic/cq_desc.h
+++ b/drivers/scsi/snic/cq_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _CQ_DESC_H_
#define _CQ_DESC_H_
diff --git a/drivers/scsi/snic/cq_enet_desc.h b/drivers/scsi/snic/cq_enet_desc.h
index 0a1be2ed0288..bd7381e52521 100644
--- a/drivers/scsi/snic/cq_enet_desc.h
+++ b/drivers/scsi/snic/cq_enet_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _CQ_ENET_DESC_H_
#define _CQ_ENET_DESC_H_
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index 4ec7e30678e1..32f5a34b6987 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _SNIC_H_
#define _SNIC_H_
diff --git a/drivers/scsi/snic/snic_attrs.c b/drivers/scsi/snic/snic_attrs.c
index dc03ce1ec909..3ddbdbc3ded1 100644
--- a/drivers/scsi/snic/snic_attrs.c
+++ b/drivers/scsi/snic/snic_attrs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/string.h>
#include <linux/device.h>
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index 703f229862fc..5f4fca96b192 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 5e0faeba516e..57bdc3ba49d9 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index e9ccfb97773f..9b2b5f8c23b9 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/mempool.h>
@@ -100,7 +86,7 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
- buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
+ buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
diff --git a/drivers/scsi/snic/snic_disc.h b/drivers/scsi/snic/snic_disc.h
index 97fa3f5c5bb4..9ad7f84a3484 100644
--- a/drivers/scsi/snic/snic_disc.h
+++ b/drivers/scsi/snic/snic_disc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_DISC_H
#define __SNIC_DISC_H
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index 2a045a57e365..2550ba964b03 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_FWINT_H
#define __SNIC_FWINT_H
@@ -159,7 +145,7 @@ struct snic_exch_ver_req {
* HBA Capabilities
* Bit 1: Reserved.
* Bit 2: Dynamic Discovery of LUNs.
- * Bit 3: Async event notifications on on tgt online/offline events.
+ * Bit 3: Async event notifications on tgt online/offline events.
* Bit 4: IO timeout support in FW.
* Bit 5-31: Reserved.
*/
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 159ee94d2a55..32a77bee41d5 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/pci.h>
diff --git a/drivers/scsi/snic/snic_io.h b/drivers/scsi/snic/snic_io.h
index 093d6524cd42..de6694a24c5f 100644
--- a/drivers/scsi/snic/snic_io.h
+++ b/drivers/scsi/snic/snic_io.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _SNIC_IO_H
#define _SNIC_IO_H
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index c4da3673f2ae..471a37422da9 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/string.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 29d56396058c..174f7811fe50 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/snic/snic_res.c b/drivers/scsi/snic/snic_res.c
index b54912c8ca0c..43f1a2823514 100644
--- a/drivers/scsi/snic/snic_res.c
+++ b/drivers/scsi/snic/snic_res.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/snic_res.h b/drivers/scsi/snic/snic_res.h
index 273f72f2a023..53cf6b19ab28 100644
--- a/drivers/scsi/snic/snic_res.h
+++ b/drivers/scsi/snic/snic_res.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_RES_H
#define __SNIC_RES_H
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 5f17666f3e1d..961af6fc21bc 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/mempool.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
index faf0cb601954..f0285c5a35f8 100644
--- a/drivers/scsi/snic/snic_stats.h
+++ b/drivers/scsi/snic/snic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_STATS_H
#define __SNIC_STATS_H
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c
index f23fe2f88438..c2e5ab7e976c 100644
--- a/drivers/scsi/snic/snic_trc.c
+++ b/drivers/scsi/snic/snic_trc.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h
index ce305b4b8fa2..c38e0dadc958 100644
--- a/drivers/scsi/snic/snic_trc.h
+++ b/drivers/scsi/snic/snic_trc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef __SNIC_TRC_H
#define __SNIC_TRC_H
diff --git a/drivers/scsi/snic/vnic_cq.c b/drivers/scsi/snic/vnic_cq.c
index 3455dd7e73f4..0d5d3bd4be1c 100644
--- a/drivers/scsi/snic/vnic_cq.c
+++ b/drivers/scsi/snic/vnic_cq.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/vnic_cq.h b/drivers/scsi/snic/vnic_cq.h
index 6e651c3e16f7..6cee911eec5f 100644
--- a/drivers/scsi/snic/vnic_cq.h
+++ b/drivers/scsi/snic/vnic_cq.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_CQ_H_
#define _VNIC_CQ_H_
diff --git a/drivers/scsi/snic/vnic_cq_fw.h b/drivers/scsi/snic/vnic_cq_fw.h
index c2d1bbd44bd1..d74954bc70e3 100644
--- a/drivers/scsi/snic/vnic_cq_fw.h
+++ b/drivers/scsi/snic/vnic_cq_fw.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_CQ_FW_H_
#define _VNIC_CQ_FW_H_
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index 05e374f80946..760f3f22095c 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/vnic_dev.h b/drivers/scsi/snic/vnic_dev.h
index e65726da6504..d2f9b6f7b313 100644
--- a/drivers/scsi/snic/vnic_dev.h
+++ b/drivers/scsi/snic/vnic_dev.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
diff --git a/drivers/scsi/snic/vnic_devcmd.h b/drivers/scsi/snic/vnic_devcmd.h
index 0e0fa38f8d90..9d82fcb7414b 100644
--- a/drivers/scsi/snic/vnic_devcmd.h
+++ b/drivers/scsi/snic/vnic_devcmd.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_DEVCMD_H_
#define _VNIC_DEVCMD_H_
diff --git a/drivers/scsi/snic/vnic_intr.c b/drivers/scsi/snic/vnic_intr.c
index a7d54806787d..23627f9591f2 100644
--- a/drivers/scsi/snic/vnic_intr.c
+++ b/drivers/scsi/snic/vnic_intr.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/snic/vnic_intr.h b/drivers/scsi/snic/vnic_intr.h
index 4547f603fe5e..7bff60fafb07 100644
--- a/drivers/scsi/snic/vnic_intr.h
+++ b/drivers/scsi/snic/vnic_intr.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_INTR_H_
#define _VNIC_INTR_H_
diff --git a/drivers/scsi/snic/vnic_resource.h b/drivers/scsi/snic/vnic_resource.h
index 9713d6835db3..372596b0915f 100644
--- a/drivers/scsi/snic/vnic_resource.h
+++ b/drivers/scsi/snic/vnic_resource.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_RESOURCE_H_
#define _VNIC_RESOURCE_H_
diff --git a/drivers/scsi/snic/vnic_snic.h b/drivers/scsi/snic/vnic_snic.h
index 514d39f5cf00..ffc8a0fee577 100644
--- a/drivers/scsi/snic/vnic_snic.h
+++ b/drivers/scsi/snic/vnic_snic.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_SNIC_H_
#define _VNIC_SNIC_H_
diff --git a/drivers/scsi/snic/vnic_stats.h b/drivers/scsi/snic/vnic_stats.h
index 370a37c97748..38155aae7a52 100644
--- a/drivers/scsi/snic/vnic_stats.h
+++ b/drivers/scsi/snic/vnic_stats.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_STATS_H_
#define _VNIC_STATS_H_
diff --git a/drivers/scsi/snic/vnic_wq.c b/drivers/scsi/snic/vnic_wq.c
index 1e91d432089e..48be9a3f4c3d 100644
--- a/drivers/scsi/snic/vnic_wq.c
+++ b/drivers/scsi/snic/vnic_wq.c
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2014 Cisco Systems, Inc. All rights reserved.
#include <linux/errno.h>
#include <linux/types.h>
diff --git a/drivers/scsi/snic/vnic_wq.h b/drivers/scsi/snic/vnic_wq.h
index 7cc031c7ceba..1415da4b68dc 100644
--- a/drivers/scsi/snic/vnic_wq.h
+++ b/drivers/scsi/snic/vnic_wq.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_WQ_H_
#define _VNIC_WQ_H_
diff --git a/drivers/scsi/snic/wq_enet_desc.h b/drivers/scsi/snic/wq_enet_desc.h
index 68f62b6d105b..e8025331b503 100644
--- a/drivers/scsi/snic/wq_enet_desc.h
+++ b/drivers/scsi/snic/wq_enet_desc.h
@@ -1,19 +1,5 @@
-/*
- * Copyright 2014 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2014 Cisco Systems, Inc. All rights reserved. */
#ifndef _WQ_ENET_DESC_H_
#define _WQ_ENET_DESC_H_
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8e4af111c078..a278b739d0c5 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -109,16 +109,11 @@ static DEFINE_SPINLOCK(sr_index_lock);
static struct lock_class_key sr_bio_compl_lkclass;
-/* This semaphore is used to mediate the 0->1 reference get in the
- * face of object destruction (i.e. we can't allow a get on an
- * object after last put) */
-static DEFINE_MUTEX(sr_ref_mutex);
-
static int sr_open(struct cdrom_device_info *, int);
static void sr_release(struct cdrom_device_info *);
static void get_sectorsize(struct scsi_cd *);
-static void get_capabilities(struct scsi_cd *);
+static int get_capabilities(struct scsi_cd *);
static unsigned int sr_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int slot);
@@ -143,11 +138,9 @@ static const struct cdrom_device_ops sr_dops = {
.capability = SR_CAPABILITIES,
};
-static void sr_kref_release(struct kref *kref);
-
static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
{
- return container_of(disk->private_data, struct scsi_cd, driver);
+ return disk->private_data;
}
static int sr_runtime_suspend(struct device *dev)
@@ -163,38 +156,6 @@ static int sr_runtime_suspend(struct device *dev)
return 0;
}
-/*
- * The get and put routines for the struct scsi_cd. Note this entity
- * has a scsi_device pointer and owns a reference to this.
- */
-static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
-{
- struct scsi_cd *cd = NULL;
-
- mutex_lock(&sr_ref_mutex);
- if (disk->private_data == NULL)
- goto out;
- cd = scsi_cd(disk);
- kref_get(&cd->kref);
- if (scsi_device_get(cd->device)) {
- kref_put(&cd->kref, sr_kref_release);
- cd = NULL;
- }
- out:
- mutex_unlock(&sr_ref_mutex);
- return cd;
-}
-
-static void scsi_cd_put(struct scsi_cd *cd)
-{
- struct scsi_device *sdev = cd->device;
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- scsi_device_put(sdev);
- mutex_unlock(&sr_ref_mutex);
-}
-
static unsigned int sr_get_events(struct scsi_device *sdev)
{
u8 buf[8];
@@ -335,7 +296,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
int block_sectors = 0;
long error_sector;
struct request *rq = scsi_cmd_to_rq(SCpnt);
- struct scsi_cd *cd = scsi_cd(rq->rq_disk);
+ struct scsi_cd *cd = scsi_cd(rq->q->disk);
#ifdef DEBUG
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
@@ -402,7 +363,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
ret = scsi_alloc_sgtables(SCpnt);
if (ret != BLK_STS_OK)
return ret;
- cd = scsi_cd(rq->rq_disk);
+ cd = scsi_cd(rq->q->disk);
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
"Doing sr request, block = %d\n", block));
@@ -522,15 +483,13 @@ static void sr_revalidate_disk(struct scsi_cd *cd)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
- struct scsi_cd *cd;
- struct scsi_device *sdev;
- int ret = -ENXIO;
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
+ struct scsi_device *sdev = cd->device;
+ int ret;
- cd = scsi_cd_get(bdev->bd_disk);
- if (!cd)
- goto out;
+ if (scsi_device_get(cd->device))
+ return -ENXIO;
- sdev = cd->device;
scsi_autopm_get_device(sdev);
if (bdev_check_media_change(bdev))
sr_revalidate_disk(cd);
@@ -541,9 +500,7 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
scsi_autopm_put_device(sdev);
if (ret)
- scsi_cd_put(cd);
-
-out:
+ scsi_device_put(cd->device);
return ret;
}
@@ -555,14 +512,13 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
cdrom_release(&cd->cdi, mode);
mutex_unlock(&cd->lock);
- scsi_cd_put(cd);
+ scsi_device_put(cd->device);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
- struct scsi_cd *cd = scsi_cd(disk);
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
struct scsi_device *sdev = cd->device;
void __user *argp = (void __user *)arg;
int ret;
@@ -579,12 +535,12 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
scsi_autopm_get_device(sdev);
- if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+ if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
goto put;
}
- ret = scsi_ioctl(sdev, disk, mode, cmd, argp);
+ ret = scsi_ioctl(sdev, mode, cmd, argp);
put:
scsi_autopm_put_device(sdev);
@@ -596,18 +552,24 @@ out:
static unsigned int sr_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
- unsigned int ret = 0;
- struct scsi_cd *cd;
+ struct scsi_cd *cd = disk->private_data;
- cd = scsi_cd_get(disk);
- if (!cd)
+ if (atomic_read(&cd->device->disk_events_disable_depth))
return 0;
+ return cdrom_check_events(&cd->cdi, clearing);
+}
- if (!atomic_read(&cd->device->disk_events_disable_depth))
- ret = cdrom_check_events(&cd->cdi, clearing);
+static void sr_free_disk(struct gendisk *disk)
+{
+ struct scsi_cd *cd = disk->private_data;
- scsi_cd_put(cd);
- return ret;
+ spin_lock(&sr_index_lock);
+ clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
+ spin_unlock(&sr_index_lock);
+
+ unregister_cdrom(&cd->cdi);
+ mutex_destroy(&cd->lock);
+ kfree(cd);
}
static const struct block_device_operations sr_bdops =
@@ -618,6 +580,7 @@ static const struct block_device_operations sr_bdops =
.ioctl = sr_block_ioctl,
.compat_ioctl = blkdev_compat_ptr_ioctl,
.check_events = sr_block_check_events,
+ .free_disk = sr_free_disk,
};
static int sr_open(struct cdrom_device_info *cdi, int purpose)
@@ -661,10 +624,8 @@ static int sr_probe(struct device *dev)
if (!cd)
goto fail;
- kref_init(&cd->kref);
-
- disk = __alloc_disk_node(sdev->request_queue, NUMA_NO_NODE,
- &sr_bio_compl_lkclass);
+ disk = blk_mq_alloc_disk_for_queue(sdev->request_queue,
+ &sr_bio_compl_lkclass);
if (!disk)
goto fail_free;
mutex_init(&cd->lock);
@@ -684,15 +645,15 @@ static int sr_probe(struct device *dev)
disk->minors = 1;
sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops;
- disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
- disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
+ disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT |
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
cd->device = sdev;
cd->disk = disk;
- cd->driver = &sr_template;
cd->capacity = 0x1fffff;
cd->device->changed = 1; /* force recheck CD type */
cd->media_present = 1;
@@ -708,12 +669,13 @@ static int sr_probe(struct device *dev)
sdev->sector_size = 2048; /* A guess, just in case */
- /* FIXME: need to handle a get_capabilities failure properly ?? */
- get_capabilities(cd);
+ error = -ENOMEM;
+ if (get_capabilities(cd))
+ goto fail_minor;
sr_vendor_init(cd);
set_capacity(disk, cd->capacity);
- disk->private_data = &cd->driver;
+ disk->private_data = cd;
if (register_cdrom(disk, &cd->cdi))
goto fail_minor;
@@ -725,14 +687,11 @@ static int sr_probe(struct device *dev)
blk_pm_runtime_init(sdev->request_queue, dev);
dev_set_drvdata(dev, cd);
- disk->flags |= GENHD_FL_REMOVABLE;
sr_revalidate_disk(cd);
error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
- if (error) {
- kref_put(&cd->kref, sr_kref_release);
- goto fail;
- }
+ if (error)
+ goto unregister_cdrom;
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
@@ -740,6 +699,8 @@ static int sr_probe(struct device *dev)
return 0;
+unregister_cdrom:
+ unregister_cdrom(&cd->cdi);
fail_minor:
spin_lock(&sr_index_lock);
clear_bit(minor, sr_index_bits);
@@ -834,7 +795,7 @@ static void get_sectorsize(struct scsi_cd *cd)
return;
}
-static void get_capabilities(struct scsi_cd *cd)
+static int get_capabilities(struct scsi_cd *cd)
{
unsigned char *buffer;
struct scsi_mode_data data;
@@ -856,10 +817,10 @@ static void get_capabilities(struct scsi_cd *cd)
/* allocate transfer buffer */
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer) {
sr_printk(KERN_ERR, cd, "out of memory.\n");
- return;
+ return -ENOMEM;
}
/* eat unit attentions */
@@ -879,7 +840,7 @@ static void get_capabilities(struct scsi_cd *cd)
CDC_MRW | CDC_MRW_W | CDC_RAM);
kfree(buffer);
sr_printk(KERN_INFO, cd, "scsi-1 drive");
- return;
+ return 0;
}
n = data.header_length + data.block_descriptor_length;
@@ -938,6 +899,7 @@ static void get_capabilities(struct scsi_cd *cd)
}
kfree(buffer);
+ return 0;
}
/*
@@ -966,7 +928,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
{
struct gendisk *disk = cdi->disk;
u32 len = nr * CD_FRAMESIZE_RAW;
- struct scsi_request *req;
+ struct scsi_cmnd *scmd;
struct request *rq;
struct bio *bio;
int ret;
@@ -974,31 +936,31 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
- req = scsi_req(rq);
+ scmd = blk_mq_rq_to_pdu(rq);
ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL);
if (ret)
goto out_put_request;
- req->cmd[0] = GPCMD_READ_CD;
- req->cmd[1] = 1 << 2;
- req->cmd[2] = (lba >> 24) & 0xff;
- req->cmd[3] = (lba >> 16) & 0xff;
- req->cmd[4] = (lba >> 8) & 0xff;
- req->cmd[5] = lba & 0xff;
- req->cmd[6] = (nr >> 16) & 0xff;
- req->cmd[7] = (nr >> 8) & 0xff;
- req->cmd[8] = nr & 0xff;
- req->cmd[9] = 0xf8;
- req->cmd_len = 12;
+ scmd->cmnd[0] = GPCMD_READ_CD;
+ scmd->cmnd[1] = 1 << 2;
+ scmd->cmnd[2] = (lba >> 24) & 0xff;
+ scmd->cmnd[3] = (lba >> 16) & 0xff;
+ scmd->cmnd[4] = (lba >> 8) & 0xff;
+ scmd->cmnd[5] = lba & 0xff;
+ scmd->cmnd[6] = (nr >> 16) & 0xff;
+ scmd->cmnd[7] = (nr >> 8) & 0xff;
+ scmd->cmnd[8] = nr & 0xff;
+ scmd->cmnd[9] = 0xf8;
+ scmd->cmd_len = 12;
rq->timeout = 60 * HZ;
bio = rq->bio;
- blk_execute_rq(disk, rq, 0);
- if (scsi_req(rq)->result) {
+ blk_execute_rq(rq, false);
+ if (scmd->result) {
struct scsi_sense_hdr sshdr;
- scsi_normalize_sense(req->sense, req->sense_len,
+ scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
&sshdr);
*last_sense = sshdr.sense_key;
ret = -EIO;
@@ -1011,36 +973,6 @@ out_put_request:
return ret;
}
-
-/**
- * sr_kref_release - Called to free the scsi_cd structure
- * @kref: pointer to embedded kref
- *
- * sr_ref_mutex must be held entering this routine. Because it is
- * called on last put, you should always use the scsi_cd_get()
- * scsi_cd_put() helpers which manipulate the semaphore directly
- * and never do a direct kref_put().
- **/
-static void sr_kref_release(struct kref *kref)
-{
- struct scsi_cd *cd = container_of(kref, struct scsi_cd, kref);
- struct gendisk *disk = cd->disk;
-
- spin_lock(&sr_index_lock);
- clear_bit(MINOR(disk_devt(disk)), sr_index_bits);
- spin_unlock(&sr_index_lock);
-
- unregister_cdrom(&cd->cdi);
-
- disk->private_data = NULL;
-
- put_disk(disk);
-
- mutex_destroy(&cd->lock);
-
- kfree(cd);
-}
-
static int sr_remove(struct device *dev)
{
struct scsi_cd *cd = dev_get_drvdata(dev);
@@ -1048,11 +980,7 @@ static int sr_remove(struct device *dev)
scsi_autopm_get_device(cd->device);
del_gendisk(cd->disk);
- dev_set_drvdata(dev, NULL);
-
- mutex_lock(&sr_ref_mutex);
- kref_put(&cd->kref, sr_kref_release);
- mutex_unlock(&sr_ref_mutex);
+ put_disk(cd->disk);
return 0;
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 339c624e04d8..1175f2e213b5 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -18,8 +18,6 @@
#ifndef _SR_H
#define _SR_H
-#include <linux/genhd.h>
-#include <linux/kref.h>
#include <linux/mutex.h>
#define MAX_RETRIES 3
@@ -33,7 +31,6 @@ struct scsi_device;
typedef struct scsi_cd {
- struct scsi_driver *driver;
unsigned capacity; /* size in blocks */
struct scsi_device *device;
unsigned int vendor; /* vendor code, see sr_vendor.c */
@@ -53,9 +50,6 @@ typedef struct scsi_cd {
struct cdrom_device_info cdi;
struct mutex lock;
- /* We hold gendisk and scsi_device references on probe and use
- * the refs on this kref to decide when to release them */
- struct kref kref;
struct gendisk *disk;
} Scsi_CD;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ddd00efc4882..fbdb5124d7f7 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
+err:
kfree(buffer);
return result;
}
@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ buffer[10]) << 8) + buffer[11];
+err:
kfree(buffer);
return result;
}
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
- char *buffer = kmalloc(32, GFP_KERNEL);
+ char *buffer = kzalloc(32, GFP_KERNEL);
int result;
if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = IOCTL_TIMEOUT;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
+err:
kfree(buffer);
return result;
}
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 1f988a1b9166..a61635326ae0 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -131,7 +131,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -179,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (cd->cdi.mask & CDC_MULTI_SESSION)
return 0;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c2d5608f6b1a..b90a440e135d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -472,15 +472,16 @@ static void st_release_request(struct st_request *streq)
static void st_do_stats(struct scsi_tape *STp, struct request *req)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
ktime_t now;
now = ktime_get();
- if (scsi_req(req)->cmd[0] == WRITE_6) {
+ if (scmd->cmnd[0] == WRITE_6) {
now = ktime_sub(now, STp->stats->write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_write_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->write_cnt);
- if (scsi_req(req)->result) {
+ if (scmd->result) {
atomic64_add(atomic_read(&STp->stats->last_write_size)
- STp->buffer->cmdstat.residual,
&STp->stats->write_byte_cnt);
@@ -489,12 +490,12 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
} else
atomic64_add(atomic_read(&STp->stats->last_write_size),
&STp->stats->write_byte_cnt);
- } else if (scsi_req(req)->cmd[0] == READ_6) {
+ } else if (scmd->cmnd[0] == READ_6) {
now = ktime_sub(now, STp->stats->read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_read_time);
atomic64_add(ktime_to_ns(now), &STp->stats->tot_io_time);
atomic64_inc(&STp->stats->read_cnt);
- if (scsi_req(req)->result) {
+ if (scmd->result) {
atomic64_add(atomic_read(&STp->stats->last_read_size)
- STp->buffer->cmdstat.residual,
&STp->stats->read_byte_cnt);
@@ -511,26 +512,28 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
atomic64_dec(&STp->stats->in_flight);
}
-static void st_scsi_execute_end(struct request *req, blk_status_t status)
+static enum rq_end_io_ret st_scsi_execute_end(struct request *req,
+ blk_status_t status)
{
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
struct st_request *SRpnt = req->end_io_data;
- struct scsi_request *rq = scsi_req(req);
struct scsi_tape *STp = SRpnt->stp;
struct bio *tmp;
- STp->buffer->cmdstat.midlevel_result = SRpnt->result = rq->result;
- STp->buffer->cmdstat.residual = rq->resid_len;
+ STp->buffer->cmdstat.midlevel_result = SRpnt->result = scmd->result;
+ STp->buffer->cmdstat.residual = scmd->resid_len;
st_do_stats(STp, req);
tmp = SRpnt->bio;
- if (rq->sense_len)
- memcpy(SRpnt->sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
+ if (scmd->sense_len)
+ memcpy(SRpnt->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
if (SRpnt->waiting)
complete(SRpnt->waiting);
blk_rq_unmap_user(tmp);
blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
@@ -538,17 +541,17 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
int timeout, int retries)
{
struct request *req;
- struct scsi_request *rq;
struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;
int err = 0;
struct scsi_tape *STp = SRpnt->stp;
+ struct scsi_cmnd *scmd;
req = scsi_alloc_request(SRpnt->stp->device->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
- rq = scsi_req(req);
+ scmd = blk_mq_rq_to_pdu(req);
req->rq_flags |= RQF_QUIET;
mdata->null_mapped = 1;
@@ -574,14 +577,14 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
}
SRpnt->bio = req->bio;
- rq->cmd_len = COMMAND_SIZE(cmd[0]);
- memset(rq->cmd, 0, BLK_MAX_CDB);
- memcpy(rq->cmd, cmd, rq->cmd_len);
+ scmd->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(scmd->cmnd, cmd, scmd->cmd_len);
req->timeout = timeout;
- rq->retries = retries;
+ scmd->allowed = retries;
+ req->end_io = st_scsi_execute_end;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(NULL, req, 1, st_scsi_execute_end);
+ blk_execute_rq_nowait(req, true);
return 0;
}
@@ -3829,7 +3832,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
break;
}
- retval = scsi_ioctl(STp->device, NULL, file->f_mode, cmd_in, p);
+ retval = scsi_ioctl(STp->device, file->f_mode, cmd_in, p);
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
/* unload */
STp->rew_at_close = 0;
@@ -4245,11 +4248,10 @@ static int st_probe(struct device *dev)
struct st_partstat *STps;
struct st_buffer *buffer;
int i, error;
- char *stp;
if (SDp->type != TYPE_TAPE)
return -ENODEV;
- if ((stp = st_incompatible(SDp))) {
+ if (st_incompatible(SDp)) {
sdev_printk(KERN_INFO, SDp,
"OnStream tapes are no longer supported;\n");
sdev_printk(KERN_INFO, SDp,
@@ -4276,7 +4278,6 @@ static int st_probe(struct device *dev)
goto out_buffer_free;
}
kref_init(&tpnt->kref);
- tpnt->driver = &st_template;
tpnt->device = SDp;
if (SDp->scsi_level <= 2)
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index c0ef0d9aaf8a..7a68eaba7e81 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -117,7 +117,6 @@ struct scsi_tape_stats {
/* The tape drive descriptor */
struct scsi_tape {
- struct scsi_driver *driver;
struct scsi_device *device;
struct mutex lock; /* For serialization */
struct completion wait; /* For SCSI commands */
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index e6420f2127ce..8def242675ef 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -665,16 +665,17 @@ static int stex_queuecommand_lck(struct scsi_cmnd *cmd)
return 0;
case PASSTHRU_CMD:
if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
- struct st_drvver ver;
+ const struct st_drvver ver = {
+ .major = ST_VER_MAJOR,
+ .minor = ST_VER_MINOR,
+ .oem = ST_OEM,
+ .build = ST_BUILD_VER,
+ .signature[0] = PASSTHRU_SIGNATURE,
+ .console_id = host->max_id - 1,
+ .host_no = hba->host->host_no,
+ };
size_t cp_len = sizeof(ver);
- ver.major = ST_VER_MAJOR;
- ver.minor = ST_VER_MINOR;
- ver.oem = ST_OEM;
- ver.build = ST_BUILD_VER;
- ver.signature[0] = PASSTHRU_SIGNATURE;
- ver.console_id = host->max_id - 1;
- ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
if (sizeof(ver) == cp_len)
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 20595c0ba0ae..bc46721aa01c 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -21,6 +21,8 @@
#include <linux/device.h>
#include <linux/hyperv.h>
#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -52,13 +54,15 @@
#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
(((MINOR_) & 0xff)))
-
#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
+/* channel callback timeout in ms */
+#define CALLBACK_TIMEOUT 2
+
/* Packet structure describing virtual storage requests. */
enum vstor_packet_operation {
VSTOR_OPERATION_COMPLETE_IO = 1,
@@ -134,21 +138,11 @@ struct hv_fc_wwn_packet {
*/
#define STORVSC_MAX_CMD_LEN 0x10
-#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14
-#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12
-
+/* Sense buffer size is the same for all versions since Windows 8 */
#define STORVSC_SENSE_BUFFER_SIZE 0x14
#define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
/*
- * Sense buffer size changed in win8; have a run-time
- * variable to track the size we should use. This value will
- * likely change during protocol negotiation but it is valid
- * to start by assuming pre-Win8.
- */
-static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
-
-/*
* The storage protocol version is determined during the
* initial exchange with the host. It will indicate which
* storage functionality is available in the host.
@@ -175,18 +169,6 @@ do { \
dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \
} while (0)
-struct vmscsi_win8_extension {
- /*
- * The following were added in Windows 8
- */
- u16 reserve;
- u8 queue_tag;
- u8 queue_action;
- u32 srb_flags;
- u32 time_out_value;
- u32 queue_sort_ey;
-} __packed;
-
struct vmscsi_request {
u16 length;
u8 srb_status;
@@ -212,46 +194,23 @@ struct vmscsi_request {
/*
* The following was added in win8.
*/
- struct vmscsi_win8_extension win8_extension;
+ u16 reserve;
+ u8 queue_tag;
+ u8 queue_action;
+ u32 srb_flags;
+ u32 time_out_value;
+ u32 queue_sort_ey;
} __attribute((packed));
/*
- * The list of storage protocols in order of preference.
+ * The list of windows version in order of preference.
*/
-struct vmstor_protocol {
- int protocol_version;
- int sense_buffer_size;
- int vmscsi_size_delta;
-};
-
-static const struct vmstor_protocol vmstor_protocols[] = {
- {
+static const int protocol_version[] = {
VMSTOR_PROTO_VERSION_WIN10,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
VMSTOR_PROTO_VERSION_WIN8_1,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
VMSTOR_PROTO_VERSION_WIN8,
- POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
- 0
- },
- {
- VMSTOR_PROTO_VERSION_WIN7,
- PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
- sizeof(struct vmscsi_win8_extension),
- },
- {
- VMSTOR_PROTO_VERSION_WIN6,
- PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
- sizeof(struct vmscsi_win8_extension),
- }
};
@@ -407,9 +366,7 @@ static void storvsc_on_channel_callback(void *context);
#define STORVSC_IDE_MAX_CHANNELS 1
/*
- * Upper bound on the size of a storvsc packet. vmscsi_size_delta is not
- * included in the calculation because it is set after STORVSC_MAX_PKT_SIZE
- * is used in storvsc_connect_to_vsp
+ * Upper bound on the size of a storvsc packet.
*/
#define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\
sizeof(struct vstor_packet))
@@ -451,17 +408,6 @@ struct storvsc_device {
unsigned char target_id;
/*
- * The size of the vmscsi_request has changed in win8. The
- * additional size is because of new elements added to the
- * structure. These elements are valid only when we are talking
- * to a win8 host.
- * Track the correction to size we need to apply. This value
- * will likely change during protocol negotiation but it is
- * valid to start by assuming pre-Win8.
- */
- int vmscsi_size_delta;
-
- /*
* Max I/O, the device can support.
*/
u32 max_transfer_bytes;
@@ -536,7 +482,7 @@ static void storvsc_host_scan(struct work_struct *work)
host = host_device->host;
/*
* Before scanning the host, first check to see if any of the
- * currrently known devices have been hot removed. We issue a
+ * currently known devices have been hot removed. We issue a
* "unit ready" command against all currently known devices.
* This I/O will result in an error for devices that have been
* removed. As part of handling the I/O error, we remove the device.
@@ -793,8 +739,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
vstor_packet->sub_channel_count = num_sc;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -862,8 +807,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
VMBUS_RQST_INIT,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -913,14 +857,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
* Query host supported protocol version.
*/
- for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
+ for (i = 0; i < ARRAY_SIZE(protocol_version); i++) {
/* reuse the packet for version range supported */
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation =
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
- vstor_packet->version.major_minor =
- vmstor_protocols[i].protocol_version;
+ vstor_packet->version.major_minor = protocol_version[i];
/*
* The revision number is only used in Windows; set it to 0.
@@ -934,21 +877,16 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
return -EINVAL;
if (vstor_packet->status == 0) {
- vmstor_proto_version =
- vmstor_protocols[i].protocol_version;
-
- sense_buffer_size =
- vmstor_protocols[i].sense_buffer_size;
-
- stor_device->vmscsi_size_delta =
- vmstor_protocols[i].vmscsi_size_delta;
+ vmstor_proto_version = protocol_version[i];
break;
}
}
- if (vstor_packet->status != 0)
+ if (vstor_packet->status != 0) {
+ dev_err(&device->device, "Obsolete Hyper-V version\n");
return -EINVAL;
+ }
memset(vstor_packet, 0, sizeof(struct vstor_packet));
@@ -984,11 +922,10 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
cpumask_set_cpu(device->channel->target_cpu,
&stor_device->alloced_cpus);
- if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
- if (vstor_packet->storage_channel_properties.flags &
- STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
- process_sub_channels = true;
- }
+ if (vstor_packet->storage_channel_properties.flags &
+ STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
+ process_sub_channels = true;
+
stor_device->max_transfer_bytes =
vstor_packet->storage_channel_properties.max_transfer_bytes;
@@ -1095,7 +1032,7 @@ do_work:
*/
wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
if (!wrk) {
- set_host_byte(scmnd, DID_TARGET_FAILURE);
+ set_host_byte(scmnd, DID_BAD_TARGET);
return;
}
@@ -1195,7 +1132,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
* Copy over the sense_info_length, but limit to the known max
* size if Hyper-V returns a bad value.
*/
- stor_pkt->vm_srb.sense_info_length = min_t(u8, sense_buffer_size,
+ stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE,
vstor_packet->vm_srb.sense_info_length);
if (vstor_packet->vm_srb.scsi_status != 0 ||
@@ -1270,6 +1207,7 @@ static void storvsc_on_channel_callback(void *context)
struct hv_device *device;
struct storvsc_device *stor_device;
struct Scsi_Host *shost;
+ unsigned long time_limit = jiffies + msecs_to_jiffies(CALLBACK_TIMEOUT);
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1287,8 +1225,13 @@ static void storvsc_on_channel_callback(void *context)
struct storvsc_cmd_request *request = NULL;
u32 pktlen = hv_pkt_datalen(desc);
u64 rqst_id = desc->trans_id;
- u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
+ u32 minlen = rqst_id ? sizeof(struct vstor_packet) :
+ sizeof(enum vstor_packet_operation);
+
+ if (unlikely(time_after(jiffies, time_limit))) {
+ hv_pkt_iter_close(channel);
+ return;
+ }
if (pktlen < minlen) {
dev_err(&device->device,
@@ -1336,6 +1279,7 @@ static void storvsc_on_channel_callback(void *context)
continue;
}
request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
+ scsi_dma_unmap(scmnd);
}
storvsc_on_receive(stor_device, packet, request);
@@ -1343,7 +1287,7 @@ static void storvsc_on_channel_callback(void *context)
}
memcpy(&request->vstor_packet, packet,
- (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta));
+ sizeof(struct vstor_packet));
complete(&request->wait_event);
}
}
@@ -1554,11 +1498,10 @@ static int storvsc_do_io(struct hv_device *device,
found_channel:
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
- vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
- stor_device->vmscsi_size_delta);
+ vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
- vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
+ vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
vstor_packet->vm_srb.data_transfer_length =
@@ -1571,13 +1514,11 @@ found_channel:
ret = vmbus_sendpacket_mpb_desc(outgoing_channel,
request->payload, request->payload_sz,
vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
(unsigned long)request);
} else {
ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1681,8 +1622,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
vstor_packet->vm_srb.path_id = stor_device->path_id;
ret = vmbus_sendpacket(device->channel, vstor_packet,
- (sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta),
+ sizeof(struct vstor_packet),
VMBUS_RQST_RESET,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1749,9 +1689,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
- int i;
struct scatterlist *sgl;
- unsigned int sg_count;
struct vmscsi_request *vm_srb;
struct vmbus_packet_mpb_array *payload;
u32 payload_sz;
@@ -1777,31 +1715,31 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet));
vm_srb = &cmd_request->vstor_packet.vm_srb;
- vm_srb->win8_extension.time_out_value = 60;
+ vm_srb->time_out_value = 60;
- vm_srb->win8_extension.srb_flags |=
+ vm_srb->srb_flags |=
SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
if (scmnd->device->tagged_supported) {
- vm_srb->win8_extension.srb_flags |=
+ vm_srb->srb_flags |=
(SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
- vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
- vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
+ vm_srb->queue_tag = SP_UNTAGGED;
+ vm_srb->queue_action = SRB_SIMPLE_TAG_REQUEST;
}
/* Build the SRB */
switch (scmnd->sc_data_direction) {
case DMA_TO_DEVICE:
vm_srb->data_in = WRITE_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
+ vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT;
break;
case DMA_FROM_DEVICE:
vm_srb->data_in = READ_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
+ vm_srb->srb_flags |= SRB_FLAGS_DATA_IN;
break;
case DMA_NONE:
vm_srb->data_in = UNKNOWN_TYPE;
- vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
+ vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
break;
default:
/*
@@ -1824,17 +1762,17 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
sgl = (struct scatterlist *)scsi_sglist(scmnd);
- sg_count = scsi_sg_count(scmnd);
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
payload_sz = sizeof(cmd_request->mpb);
- if (sg_count) {
- unsigned int hvpgoff, hvpfns_to_add;
+ if (scsi_sg_count(scmnd)) {
unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
- u64 hvpfn;
+ struct scatterlist *sg;
+ unsigned long hvpfn, hvpfns_to_add;
+ int j, i = 0, sg_count;
if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
@@ -1848,21 +1786,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
payload->range.len = length;
payload->range.offset = offset_in_hvpg;
+ sg_count = scsi_dma_map(scmnd);
+ if (sg_count < 0) {
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
+ }
- for (i = 0; sgl != NULL; sgl = sg_next(sgl)) {
+ for_each_sg(sgl, sg, sg_count, j) {
/*
- * Init values for the current sgl entry. hvpgoff
- * and hvpfns_to_add are in units of Hyper-V size
- * pages. Handling the PAGE_SIZE != HV_HYP_PAGE_SIZE
- * case also handles values of sgl->offset that are
- * larger than PAGE_SIZE. Such offsets are handled
- * even on other than the first sgl entry, provided
- * they are a multiple of PAGE_SIZE.
+ * Init values for the current sgl entry. hvpfns_to_add
+ * is in units of Hyper-V size pages. Handling the
+ * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles
+ * values of sgl->offset that are larger than PAGE_SIZE.
+ * Such offsets are handled even on other than the first
+ * sgl entry, provided they are a multiple of PAGE_SIZE.
*/
- hvpgoff = HVPFN_DOWN(sgl->offset);
- hvpfn = page_to_hvpfn(sg_page(sgl)) + hvpgoff;
- hvpfns_to_add = HVPFN_UP(sgl->offset + sgl->length) -
- hvpgoff;
+ hvpfn = HVPFN_DOWN(sg_dma_address(sg));
+ hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) +
+ sg_dma_len(sg)) - hvpfn;
/*
* Fill the next portion of the PFN array with
@@ -1872,7 +1813,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
* the PFN array is filled.
*/
while (hvpfns_to_add--)
- payload->range.pfn_array[i++] = hvpfn++;
+ payload->range.pfn_array[i++] = hvpfn++;
}
}
@@ -1884,13 +1825,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
put_cpu();
if (ret == -EAGAIN) {
- if (payload_sz > sizeof(cmd_request->mpb))
- kfree(payload);
/* no more space */
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
}
return 0;
+
+err_free_payload:
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
+
+ return ret;
}
static struct scsi_host_template scsi_driver = {
@@ -1907,7 +1853,7 @@ static struct scsi_host_template scsi_driver = {
.cmd_per_lun = 2048,
.this_id = -1,
/* Ensure there are no gaps in presented sgls */
- .virt_boundary_mask = PAGE_SIZE-1,
+ .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1,
.no_write_same = 1,
.track_queue_depth = 1,
.change_queue_depth = storvsc_change_queue_depth,
@@ -1957,34 +1903,17 @@ static int storvsc_probe(struct hv_device *device,
bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
int target = 0;
struct storvsc_device *stor_device;
- int max_luns_per_target;
- int max_targets;
- int max_channels;
int max_sub_channels = 0;
+ u32 max_xfer_bytes;
/*
- * Based on the windows host we are running on,
- * set state to properly communicate with the host.
+ * We support sub-channels for storage on SCSI and FC controllers.
+ * The number of sub-channels offerred is based on the number of
+ * VCPUs in the guest.
*/
-
- if (vmbus_proto_version < VERSION_WIN8) {
- max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
- max_targets = STORVSC_IDE_MAX_TARGETS;
- max_channels = STORVSC_IDE_MAX_CHANNELS;
- } else {
- max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
- max_targets = STORVSC_MAX_TARGETS;
- max_channels = STORVSC_MAX_CHANNELS;
- /*
- * On Windows8 and above, we support sub-channels for storage
- * on SCSI and FC controllers.
- * The number of sub-channels offerred is based on the number of
- * VCPUs in the guest.
- */
- if (!dev_is_ide)
- max_sub_channels =
- (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
- }
+ if (!dev_is_ide)
+ max_sub_channels =
+ (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
scsi_driver.can_queue = max_outstanding_req_per_channel *
(max_sub_channels + 1) *
@@ -2013,9 +1942,9 @@ static int storvsc_probe(struct hv_device *device,
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
stor_device->host = host;
- stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
spin_lock_init(&stor_device->lock);
hv_set_drvdata(device, stor_device);
+ dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
@@ -2036,9 +1965,9 @@ static int storvsc_probe(struct hv_device *device,
break;
case SCSI_GUID:
- host->max_lun = max_luns_per_target;
- host->max_id = max_targets;
- host->max_channel = max_channels - 1;
+ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_MAX_TARGETS;
+ host->max_channel = STORVSC_MAX_CHANNELS - 1;
break;
default:
@@ -2049,12 +1978,28 @@ static int storvsc_probe(struct hv_device *device,
}
/* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
-
/*
- * set the table size based on the info we got
- * from the host.
+ * Any reasonable Hyper-V configuration should provide
+ * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
+ * protecting it from any weird value.
*/
- host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
+ /* max_hw_sectors_kb */
+ host->max_sectors = max_xfer_bytes >> 9;
+ /*
+ * There are 2 requirements for Hyper-V storvsc sgl segments,
+ * based on which the below calculation for max segments is
+ * done:
+ *
+ * 1. Except for the first and last sgl segment, all sgl segments
+ * should be align to HV_HYP_PAGE_SIZE, that also means the
+ * maximum number of segments in a sgl can be calculated by
+ * dividing the total max transfer length by HV_HYP_PAGE_SIZE.
+ *
+ * 2. Except for the first and last, each entry in the SGL must
+ * have an offset that is a multiple of HV_HYP_PAGE_SIZE.
+ */
+ host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
/*
* For non-IDE disks, the host supports multiple channels.
* Set the number of HW queues we are supporting.
@@ -2076,7 +2021,7 @@ static int storvsc_probe(struct hv_device *device,
*/
host_dev->handle_error_wq =
alloc_ordered_workqueue("storvsc_error_wq_%d",
- WQ_MEM_RECLAIM,
+ 0,
host->host_no);
if (!host_dev->handle_error_wq) {
ret = -ENOMEM;
@@ -2123,7 +2068,7 @@ err_out3:
err_out2:
/*
* Once we have connected with the host, we would need to
- * to invoke storvsc_dev_remove() to rollback this state and
+ * invoke storvsc_dev_remove() to rollback this state and
* this call also frees up the stor_device; hence the jump around
* err_out1 label.
*/
@@ -2225,10 +2170,6 @@ static int __init storvsc_drv_init(void)
* than the ring buffer size since that page is reserved for
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
- *
- * The computation underestimates max_outstanding_req_per_channel
- * for Win7 and older hosts because it does not take into account
- * the vmscsi_size_delta correction to the max request size.
*/
max_outstanding_req_per_channel =
((storvsc_ringbuffer_size - PAGE_SIZE) /
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index f7f724a3ff1d..abf229b847a1 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -334,7 +334,7 @@ static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
struct scsi_cmnd *cmd)
{
- int wanted_len = cmd->SCp.this_residual;
+ int wanted_len = NCR5380_to_ncmd(cmd)->this_residual;
if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
return 0;
@@ -505,7 +505,7 @@ static struct scsi_host_template sun3_scsi_template = {
.sg_tablesize = 1,
.cmd_per_lun = 2,
.dma_boundary = PAGE_SIZE - 1,
- .cmd_size = NCR5380_CMD_SIZE,
+ .cmd_size = sizeof(struct NCR5380_cmd),
};
static int __init sun3_scsi_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b04bfde65e3f..2e2852bd5860 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -118,7 +118,7 @@ struct sym_ucmd { /* Override the SCSI pointer structure */
struct completion *eh_done; /* SCSI error handling */
};
-#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp))
+#define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)scsi_cmd_priv(cmd))
#define SYM_SOFTC_PTR(cmd) sym_get_hcb(cmd->device->host)
/*
@@ -127,7 +127,6 @@ struct sym_ucmd { /* Override the SCSI pointer structure */
void sym_xpt_done(struct sym_hcb *np, struct scsi_cmnd *cmd)
{
struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd);
- BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd));
if (ucmd->eh_done)
complete(ucmd->eh_done);
@@ -1630,6 +1629,7 @@ static struct scsi_host_template sym2_template = {
.module = THIS_MODULE,
.name = "sym53c8xx",
.info = sym53c8xx_info,
+ .cmd_size = sizeof(struct sym_ucmd),
.queuecommand = sym53c8xx_queue_command,
.slave_alloc = sym53c8xx_slave_alloc,
.slave_configure = sym53c8xx_slave_configure,
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 255a2d48d421..f0db17e34ea0 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3598,7 +3598,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num)
}
/*
- * Gerard's alchemy:) that deals with with the data
+ * Gerard's alchemy:) that deals with the data
* pointer for both MDP and the residual calculation.
*
* I didn't want to bloat the code by more than 200
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
deleted file mode 100644
index b2521b830be7..000000000000
--- a/drivers/scsi/ufs/Kconfig
+++ /dev/null
@@ -1,210 +0,0 @@
-#
-# Kernel configuration file for the UFS Host Controller
-#
-# This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011-2013 Samsung India Software Operations
-#
-# Authors:
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-# See the COPYING file in the top-level directory or visit
-# <http://www.gnu.org/licenses/gpl-2.0.html>
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# This program is provided "AS IS" and "WITH ALL FAULTS" and
-# without warranty of any kind. You are solely responsible for
-# determining the appropriateness of using and distributing
-# the program and assume all risks associated with your exercise
-# of rights with respect to the program, including but not limited
-# to infringement of third party rights, the risks and costs of
-# program errors, damage to or loss of data, programs or equipment,
-# and unavailability or interruption of operations. Under no
-# circumstances will the contributor of this Program be liable for
-# any damages of any kind arising from your use or distribution of
-# this program.
-
-config SCSI_UFSHCD
- tristate "Universal Flash Storage Controller Driver Core"
- depends on SCSI && SCSI_DMA
- select PM_DEVFREQ
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select NLS
- help
- This selects the support for UFS devices in Linux, say Y and make
- sure that you know the name of your UFS host adapter (the card
- inside your computer that "speaks" the UFS protocol, also
- called UFS Host Controller), because you will be asked for it.
- The module will be called ufshcd.
-
- To compile this driver as a module, choose M here and read
- <file:Documentation/scsi/ufs.rst>.
- However, do not compile this as a module if your root file system
- (the one containing the directory /) is located on a UFS device.
-
-config SCSI_UFSHCD_PCI
- tristate "PCI bus based UFS Controller support"
- depends on SCSI_UFSHCD && PCI
- help
- This selects the PCI UFS Host Controller Interface. Select this if
- you have UFS Host Controller with PCI Interface.
-
- If you have a controller with this interface, say Y or M here.
-
- If unsure, say N.
-
-config SCSI_UFS_DWC_TC_PCI
- tristate "DesignWare pci support using a G210 Test Chip"
- depends on SCSI_UFSHCD_PCI
- help
- Synopsys Test Chip is a PHY for prototyping purposes.
-
- If unsure, say N.
-
-config SCSI_UFSHCD_PLATFORM
- tristate "Platform bus based UFS Controller support"
- depends on SCSI_UFSHCD
- depends on HAS_IOMEM
- help
- This selects the UFS host controller support. Select this if
- you have an UFS controller on Platform bus.
-
- If you have a controller with this interface, say Y or M here.
-
- If unsure, say N.
-
-config SCSI_UFS_CDNS_PLATFORM
- tristate "Cadence UFS Controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM
- help
- This selects the Cadence-specific additions to UFSHCD platform driver.
-
- If unsure, say N.
-
-config SCSI_UFS_DWC_TC_PLATFORM
- tristate "DesignWare platform support using a G210 Test Chip"
- depends on SCSI_UFSHCD_PLATFORM
- help
- Synopsys Test Chip is a PHY for prototyping purposes.
-
- If unsure, say N.
-
-config SCSI_UFS_QCOM
- tristate "QCOM specific hooks to UFS controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
- select QCOM_SCM if SCSI_UFS_CRYPTO
- select RESET_CONTROLLER
- help
- This selects the QCOM specific additions to UFSHCD platform driver.
- UFS host on QCOM needs some vendor specific configuration before
- accessing the hardware which includes PHY configuration and vendor
- specific registers.
-
- Select this if you have UFS controller on QCOM chipset.
- If unsure, say N.
-
-config SCSI_UFS_MEDIATEK
- tristate "Mediatek specific hooks to UFS controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM && ARCH_MEDIATEK
- select PHY_MTK_UFS
- select RESET_TI_SYSCON
- help
- This selects the Mediatek specific additions to UFSHCD platform driver.
- UFS host on Mediatek needs some vendor specific configuration before
- accessing the hardware which includes PHY configuration and vendor
- specific registers.
-
- Select this if you have UFS controller on Mediatek chipset.
-
- If unsure, say N.
-
-config SCSI_UFS_HISI
- tristate "Hisilicon specific hooks to UFS controller platform driver"
- depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM
- help
- This selects the Hisilicon specific additions to UFSHCD platform driver.
-
- Select this if you have UFS controller on Hisilicon chipset.
- If unsure, say N.
-
-config SCSI_UFS_TI_J721E
- tristate "TI glue layer for Cadence UFS Controller"
- depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST)
- help
- This selects driver for TI glue layer for Cadence UFS Host
- Controller IP.
-
- Selects this if you have TI platform with UFS controller.
- If unsure, say N.
-
-config SCSI_UFS_BSG
- bool "Universal Flash Storage BSG device node"
- depends on SCSI_UFSHCD
- select BLK_DEV_BSGLIB
- help
- Universal Flash Storage (UFS) is SCSI transport specification for
- accessing flash storage on digital cameras, mobile phones and
- consumer electronic devices.
- A UFS controller communicates with a UFS device by exchanging
- UFS Protocol Information Units (UPIUs).
- UPIUs can not only be used as a transport layer for the SCSI protocol
- but are also used by the UFS native command set.
- This transport driver supports exchanging UFS protocol information units
- with a UFS device. See also the ufshcd driver, which is a SCSI driver
- that supports UFS devices.
-
- Select this if you need a bsg device node for your UFS controller.
- If unsure, say N.
-
-config SCSI_UFS_EXYNOS
- tristate "Exynos specific hooks to UFS controller platform driver"
- depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
- help
- This selects the Samsung Exynos SoC specific additions to UFSHCD
- platform driver. UFS host on Samsung Exynos SoC includes HCI and
- UNIPRO layer, and associates with UFS-PHY driver.
-
- Select this if you have UFS host controller on Samsung Exynos SoC.
- If unsure, say N.
-
-config SCSI_UFS_CRYPTO
- bool "UFS Crypto Engine Support"
- depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION
- help
- Enable Crypto Engine Support in UFS.
- Enabling this makes it possible for the kernel to use the crypto
- capabilities of the UFS device (if present) to perform crypto
- operations on data being transferred to/from the device.
-
-config SCSI_UFS_HPB
- bool "Support UFS Host Performance Booster"
- depends on SCSI_UFSHCD
- help
- The UFS HPB feature improves random read performance. It caches
- L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
- read command by piggybacking physical page number for bypassing FTL (flash
- translation layer)'s L2P address translation.
-
-config SCSI_UFS_FAULT_INJECTION
- bool "UFS Fault Injection Support"
- depends on SCSI_UFSHCD && FAULT_INJECTION
- help
- Enable fault injection support in the UFS driver. This makes it easier
- to test the UFS error handler and abort handler.
-
-config SCSI_UFS_HWMON
- bool "UFS Temperature Notification"
- depends on SCSI_UFSHCD=HWMON || HWMON=y
- help
- This provides support for UFS hardware monitoring. If enabled,
- a hardware monitoring device will be created for the UFS device.
-
- If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
deleted file mode 100644
index 966048875b50..000000000000
--- a/drivers/scsi/ufs/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# UFSHCD makefile
-
-# The link order is important here. ufshcd-core must initialize
-# before vendor drivers.
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
-ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
-ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
-ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
-ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
-
-obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
-obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
-obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
-obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o
-ufs_qcom-y += ufs-qcom.o
-ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o
-obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o
-obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
-obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
-obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
-obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
-obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c
deleted file mode 100644
index 7da8be2f35c4..000000000000
--- a/drivers/scsi/ufs/cdns-pltfrm.c
+++ /dev/null
@@ -1,343 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Platform UFS Host driver for Cadence controller
- *
- * Copyright (C) 2018 Cadence Design Systems, Inc.
- *
- * Authors:
- * Jan Kotas <jank@cadence.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/time.h>
-
-#include "ufshcd-pltfrm.h"
-
-#define CDNS_UFS_REG_HCLKDIV 0xFC
-#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
-#define CDNS_UFS_MAX_L4_ATTRS 12
-
-struct cdns_ufs_host {
- /**
- * cdns_ufs_dme_attr_val - for storing L4 attributes
- */
- u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
-};
-
-/**
- * cdns_ufs_get_l4_attr - get L4 attributes on local side
- * @hba: per adapter instance
- *
- */
-static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
-{
- struct cdns_ufs_host *host = ufshcd_get_variant(hba);
-
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
- &host->cdns_ufs_dme_attr_val[0]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
- &host->cdns_ufs_dme_attr_val[1]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
- &host->cdns_ufs_dme_attr_val[2]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
- &host->cdns_ufs_dme_attr_val[3]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
- &host->cdns_ufs_dme_attr_val[4]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
- &host->cdns_ufs_dme_attr_val[5]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
- &host->cdns_ufs_dme_attr_val[6]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
- &host->cdns_ufs_dme_attr_val[7]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
- &host->cdns_ufs_dme_attr_val[8]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
- &host->cdns_ufs_dme_attr_val[9]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
- &host->cdns_ufs_dme_attr_val[10]);
- ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
- &host->cdns_ufs_dme_attr_val[11]);
-}
-
-/**
- * cdns_ufs_set_l4_attr - set L4 attributes on local side
- * @hba: per adapter instance
- *
- */
-static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
-{
- struct cdns_ufs_host *host = ufshcd_get_variant(hba);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
- host->cdns_ufs_dme_attr_val[0]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
- host->cdns_ufs_dme_attr_val[1]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
- host->cdns_ufs_dme_attr_val[2]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
- host->cdns_ufs_dme_attr_val[3]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
- host->cdns_ufs_dme_attr_val[4]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
- host->cdns_ufs_dme_attr_val[5]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
- host->cdns_ufs_dme_attr_val[6]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
- host->cdns_ufs_dme_attr_val[7]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
- host->cdns_ufs_dme_attr_val[8]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
- host->cdns_ufs_dme_attr_val[9]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
- host->cdns_ufs_dme_attr_val[10]);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
- host->cdns_ufs_dme_attr_val[11]);
-}
-
-/**
- * cdns_ufs_set_hclkdiv()
- * Sets HCLKDIV register value based on the core_clk
- * @hba: host controller instance
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
-{
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
- unsigned long core_clk_rate = 0;
- u32 core_clk_div = 0;
-
- if (list_empty(head))
- return 0;
-
- list_for_each_entry(clki, head, list) {
- if (IS_ERR_OR_NULL(clki->clk))
- continue;
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
- }
-
- if (!core_clk_rate) {
- dev_err(hba->dev, "%s: unable to find core_clk rate\n",
- __func__);
- return -EINVAL;
- }
-
- core_clk_div = core_clk_rate / USEC_PER_SEC;
-
- ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV);
- /**
- * Make sure the register was updated,
- * UniPro layer will not work with an incorrect value.
- */
- mb();
-
- return 0;
-}
-
-/**
- * cdns_ufs_hce_enable_notify()
- * Called before and after HCE enable bit is set.
- * @hba: host controller instance
- * @status: notify stage (pre, post change)
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- if (status != PRE_CHANGE)
- return 0;
-
- return cdns_ufs_set_hclkdiv(hba);
-}
-
-/**
- * cdns_ufs_hibern8_notify()
- * Called around hibern8 enter/exit.
- * @hba: host controller instance
- * @cmd: UIC Command
- * @status: notify stage (pre, post change)
- *
- */
-static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
- enum ufs_notify_change_status status)
-{
- if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
- cdns_ufs_get_l4_attr(hba);
- if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
- cdns_ufs_set_l4_attr(hba);
-}
-
-/**
- * cdns_ufs_link_startup_notify()
- * Called before and after Link startup is carried out.
- * @hba: host controller instance
- * @status: notify stage (pre, post change)
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- if (status != PRE_CHANGE)
- return 0;
-
- /*
- * Some UFS devices have issues if LCC is enabled.
- * So we are setting PA_Local_TX_LCC_Enable to 0
- * before link startup which will make sure that both host
- * and device TX LCC are disabled once link startup is
- * completed.
- */
- ufshcd_disable_host_tx_lcc(hba);
-
- /*
- * Disabling Autohibern8 feature in cadence UFS
- * to mask unexpected interrupt trigger.
- */
- hba->ahit = 0;
-
- return 0;
-}
-
-/**
- * cdns_ufs_init - performs additional ufs initialization
- * @hba: host controller instance
- *
- * Returns status of initialization
- */
-static int cdns_ufs_init(struct ufs_hba *hba)
-{
- int status = 0;
- struct cdns_ufs_host *host;
- struct device *dev = hba->dev;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
-
- if (!host)
- return -ENOMEM;
- ufshcd_set_variant(hba, host);
-
- status = ufshcd_vops_phy_initialization(hba);
-
- return status;
-}
-
-/**
- * cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
- * @hba: host controller instance
- *
- * Always returns 0
- */
-static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
-{
- u32 data;
-
- /* Increase RX_Advanced_Min_ActivateTime_Capability */
- data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1);
- data |= BIT(24);
- ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1);
-
- return 0;
-}
-
-static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
- .name = "cdns-ufs-pltfm",
- .init = cdns_ufs_init,
- .hce_enable_notify = cdns_ufs_hce_enable_notify,
- .link_startup_notify = cdns_ufs_link_startup_notify,
- .hibern8_notify = cdns_ufs_hibern8_notify,
-};
-
-static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
- .name = "cdns-ufs-pltfm",
- .init = cdns_ufs_init,
- .hce_enable_notify = cdns_ufs_hce_enable_notify,
- .link_startup_notify = cdns_ufs_link_startup_notify,
- .phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
- .hibern8_notify = cdns_ufs_hibern8_notify,
-};
-
-static const struct of_device_id cdns_ufs_of_match[] = {
- {
- .compatible = "cdns,ufshc",
- .data = &cdns_ufs_pltfm_hba_vops,
- },
- {
- .compatible = "cdns,ufshc-m31-16nm",
- .data = &cdns_ufs_m31_16nm_pltfm_hba_vops,
- },
- { },
-};
-
-MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
-
-/**
- * cdns_ufs_pltfrm_probe - probe routine of the driver
- * @pdev: pointer to platform device handle
- *
- * Return zero for success and non-zero for failure
- */
-static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
-{
- int err;
- const struct of_device_id *of_id;
- struct ufs_hba_variant_ops *vops;
- struct device *dev = &pdev->dev;
-
- of_id = of_match_node(cdns_ufs_of_match, dev->of_node);
- vops = (struct ufs_hba_variant_ops *)of_id->data;
-
- /* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
-
- return err;
-}
-
-/**
- * cdns_ufs_pltfrm_remove - removes the ufs driver
- * @pdev: pointer to platform device handle
- *
- * Always returns 0
- */
-static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
-};
-
-static struct platform_driver cdns_ufs_pltfrm_driver = {
- .probe = cdns_ufs_pltfrm_probe,
- .remove = cdns_ufs_pltfrm_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "cdns-ufshcd",
- .pm = &cdns_ufs_dev_pm_ops,
- .of_match_table = cdns_ufs_of_match,
- },
-};
-
-module_platform_driver(cdns_ufs_pltfrm_driver);
-
-MODULE_AUTHOR("Jan Kotas <jank@cadence.com>");
-MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
deleted file mode 100644
index 679289e1a78e..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include "ufshcd.h"
-#include "ufshcd-dwc.h"
-#include "tc-dwc-g210.h"
-
-#include <linux/pci.h>
-#include <linux/pm_runtime.h>
-
-/* Test Chip type expected values */
-#define TC_G210_20BIT 20
-#define TC_G210_40BIT 40
-#define TC_G210_INV 0
-
-static int tc_type = TC_G210_INV;
-module_param(tc_type, int, 0);
-MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
-
-/*
- * struct ufs_hba_dwc_vops - UFS DWC specific variant operations
- */
-static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = {
- .name = "tc-dwc-g210-pci",
- .link_startup_notify = ufshcd_dwc_link_startup_notify,
-};
-
-/**
- * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev)
-{
- ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-/**
- * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space
- * data structure memory
- * @pdev: pointer to PCI handle
- */
-static void tc_dwc_g210_pci_remove(struct pci_dev *pdev)
-{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
- ufshcd_remove(hba);
-}
-
-/**
- * tc_dwc_g210_pci_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int
-tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ufs_hba *hba;
- void __iomem *mmio_base;
- int err;
-
- /* Check Test Chip type and set the specific setup routine */
- if (tc_type == TC_G210_20BIT) {
- tc_dwc_g210_pci_hba_vops.phy_initialization =
- tc_dwc_g210_config_20_bit;
- } else if (tc_type == TC_G210_40BIT) {
- tc_dwc_g210_pci_hba_vops.phy_initialization =
- tc_dwc_g210_config_40_bit;
- } else {
- dev_err(&pdev->dev, "test chip version not specified\n");
- return -EPERM;
- }
-
- err = pcim_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pcim_enable_device failed\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request and iomap failed\n");
- return err;
- }
-
- mmio_base = pcim_iomap_table(pdev)[0];
-
- err = ufshcd_alloc_host(&pdev->dev, &hba);
- if (err) {
- dev_err(&pdev->dev, "Allocation failed\n");
- return err;
- }
-
- hba->vops = &tc_dwc_g210_pci_hba_vops;
-
- err = ufshcd_init(hba, mmio_base, pdev->irq);
- if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- return err;
- }
-
- pci_set_drvdata(pdev, hba);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
-};
-
-static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
- { PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl);
-
-static struct pci_driver tc_dwc_g210_pci_driver = {
- .name = "tc-dwc-g210-pci",
- .id_table = tc_dwc_g210_pci_tbl,
- .probe = tc_dwc_g210_pci_probe,
- .remove = tc_dwc_g210_pci_remove,
- .shutdown = tc_dwc_g210_pci_shutdown,
- .driver = {
- .pm = &tc_dwc_g210_pci_pm_ops
- },
-};
-
-module_pci_driver(tc_dwc_g210_pci_driver);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
deleted file mode 100644
index 783ec43efa78..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/delay.h>
-
-#include "ufshcd-pltfrm.h"
-#include "ufshcd-dwc.h"
-#include "tc-dwc-g210.h"
-
-/*
- * UFS DWC specific variant operations
- */
-static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = {
- .name = "tc-dwc-g210-pltfm",
- .link_startup_notify = ufshcd_dwc_link_startup_notify,
- .phy_initialization = tc_dwc_g210_config_20_bit,
-};
-
-static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = {
- .name = "tc-dwc-g210-pltfm",
- .link_startup_notify = ufshcd_dwc_link_startup_notify,
- .phy_initialization = tc_dwc_g210_config_40_bit,
-};
-
-static const struct of_device_id tc_dwc_g210_pltfm_match[] = {
- {
- .compatible = "snps,g210-tc-6.00-20bit",
- .data = &tc_dwc_g210_20bit_pltfm_hba_vops,
- },
- {
- .compatible = "snps,g210-tc-6.00-40bit",
- .data = &tc_dwc_g210_40bit_pltfm_hba_vops,
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match);
-
-/**
- * tc_dwc_g210_pltfm_probe()
- * @pdev: pointer to platform device structure
- *
- */
-static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
-{
- int err;
- const struct of_device_id *of_id;
- struct ufs_hba_variant_ops *vops;
- struct device *dev = &pdev->dev;
-
- of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node);
- vops = (struct ufs_hba_variant_ops *)of_id->data;
-
- /* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
-
- return err;
-}
-
-/**
- * tc_dwc_g210_pltfm_remove()
- * @pdev: pointer to platform device structure
- *
- */
-static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
-
- return 0;
-}
-
-static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
-};
-
-static struct platform_driver tc_dwc_g210_pltfm_driver = {
- .probe = tc_dwc_g210_pltfm_probe,
- .remove = tc_dwc_g210_pltfm_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "tc-dwc-g210-pltfm",
- .pm = &tc_dwc_g210_pltfm_pm_ops,
- .of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match),
- },
-};
-
-module_platform_driver(tc_dwc_g210_pltfm_driver);
-
-MODULE_ALIAS("platform:tc-dwc-g210-pltfm");
-MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver");
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
deleted file mode 100644
index f954a68f6b4c..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include "ufshcd.h"
-#include "unipro.h"
-
-#include "ufshcd-dwc.h"
-#include "ufshci-dwc.h"
-#include "tc-dwc-g210.h"
-
-/**
- * tc_dwc_g210_setup_40bit_rmmi()
- * This function configures Synopsys TC specific atributes (40-bit RMMI)
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
-{
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
- { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
- { UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
- { UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL },
- { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
- { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
- { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
- DME_LOCAL },
- { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
- };
-
- return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
- ARRAY_SIZE(setup_attrs));
-}
-
-/**
- * tc_dwc_g210_setup_20bit_rmmi_lane0()
- * This function configures Synopsys TC 20-bit RMMI Lane 0
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
-{
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
- { UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
- DME_LOCAL },
- { UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
- };
-
- return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
- ARRAY_SIZE(setup_attrs));
-}
-
-/**
- * tc_dwc_g210_setup_20bit_rmmi_lane1()
- * This function configures Synopsys TC 20-bit RMMI Lane 1
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
-{
- int connected_rx_lanes = 0;
- int connected_tx_lanes = 0;
- int ret = 0;
-
- static const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
- { UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
- DME_LOCAL },
- };
-
- static const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
- { UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E,
- DME_LOCAL },
- { UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f,
- DME_LOCAL },
- };
-
- /* Get the available lane count */
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
- &connected_rx_lanes);
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
- &connected_tx_lanes);
-
- if (connected_tx_lanes == 2) {
-
- ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs,
- ARRAY_SIZE(setup_tx_attrs));
-
- if (ret)
- goto out;
- }
-
- if (connected_rx_lanes == 2) {
- ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs,
- ARRAY_SIZE(setup_rx_attrs));
- }
-
-out:
- return ret;
-}
-
-/**
- * tc_dwc_g210_setup_20bit_rmmi()
- * This function configures Synopsys TC specific atributes (20-bit RMMI)
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success or non-zero value on failure
- */
-static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
-{
- int ret = 0;
-
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
- { UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
- { UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
- { UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL },
- { UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
- { UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
- { UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
- };
-
- ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
- ARRAY_SIZE(setup_attrs));
- if (ret)
- goto out;
-
- /* Lane 0 configuration*/
- ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba);
- if (ret)
- goto out;
-
- /* Lane 1 configuration*/
- ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba);
- if (ret)
- goto out;
-
-out:
- return ret;
-}
-
-/**
- * tc_dwc_g210_config_40_bit()
- * This function configures Local (host) Synopsys 40-bit TC specific attributes
- *
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success non-zero value on failure
- */
-int tc_dwc_g210_config_40_bit(struct ufs_hba *hba)
-{
- int ret = 0;
-
- dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n");
- ret = tc_dwc_g210_setup_40bit_rmmi(hba);
- if (ret) {
- dev_err(hba->dev, "Configuration failed\n");
- goto out;
- }
-
- /* To write Shadow register bank to effective configuration block */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
- if (ret)
- goto out;
-
- /* To configure Debug OMC */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(tc_dwc_g210_config_40_bit);
-
-/**
- * tc_dwc_g210_config_20_bit()
- * This function configures Local (host) Synopsys 20-bit TC specific attributes
- *
- * @hba: Pointer to drivers structure
- *
- * Returns 0 on success non-zero value on failure
- */
-int tc_dwc_g210_config_20_bit(struct ufs_hba *hba)
-{
- int ret = 0;
-
- dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n");
- ret = tc_dwc_g210_setup_20bit_rmmi(hba);
- if (ret) {
- dev_err(hba->dev, "Configuration failed\n");
- goto out;
- }
-
- /* To write Shadow register bank to effective configuration block */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
- if (ret)
- goto out;
-
- /* To configure Debug OMC */
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(tc_dwc_g210_config_20_bit);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("Synopsys G210 Test Chip driver");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/tc-dwc-g210.h b/drivers/scsi/ufs/tc-dwc-g210.h
deleted file mode 100644
index 5a506da03f4a..000000000000
--- a/drivers/scsi/ufs/tc-dwc-g210.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Synopsys G210 Test Chip driver
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#ifndef _TC_DWC_G210_H
-#define _TC_DWC_G210_H
-
-int tc_dwc_g210_config_40_bit(struct ufs_hba *hba);
-int tc_dwc_g210_config_20_bit(struct ufs_hba *hba);
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c
deleted file mode 100644
index eafe0db98d54..000000000000
--- a/drivers/scsi/ufs/ti-j721e-ufs.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
-//
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#define TI_UFS_SS_CTRL 0x4
-#define TI_UFS_SS_RST_N_PCS BIT(0)
-#define TI_UFS_SS_CLK_26MHZ BIT(4)
-
-static int ti_j721e_ufs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- unsigned long clk_rate;
- void __iomem *regbase;
- struct clk *clk;
- u32 reg = 0;
- int ret;
-
- regbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(regbase))
- return PTR_ERR(regbase);
-
- pm_runtime_enable(dev);
- ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
- goto disable_pm;
- }
-
- /* Select MPHY refclk frequency */
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "Cannot claim MPHY clock.\n");
- goto clk_err;
- }
- clk_rate = clk_get_rate(clk);
- if (clk_rate == 26000000)
- reg |= TI_UFS_SS_CLK_26MHZ;
- devm_clk_put(dev, clk);
-
- /* Take UFS slave device out of reset */
- reg |= TI_UFS_SS_RST_N_PCS;
- writel(reg, regbase + TI_UFS_SS_CTRL);
-
- ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
- dev);
- if (ret) {
- dev_err(dev, "failed to populate child nodes %d\n", ret);
- goto clk_err;
- }
-
- return ret;
-
-clk_err:
- pm_runtime_put_sync(dev);
-disable_pm:
- pm_runtime_disable(dev);
- return ret;
-}
-
-static int ti_j721e_ufs_remove(struct platform_device *pdev)
-{
- of_platform_depopulate(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
-static const struct of_device_id ti_j721e_ufs_of_match[] = {
- {
- .compatible = "ti,j721e-ufs",
- },
- { },
-};
-
-static struct platform_driver ti_j721e_ufs_driver = {
- .probe = ti_j721e_ufs_probe,
- .remove = ti_j721e_ufs_remove,
- .driver = {
- .name = "ti-j721e-ufs",
- .of_match_table = ti_j721e_ufs_of_match,
- },
-};
-module_platform_driver(ti_j721e_ufs_driver);
-
-MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
-MODULE_DESCRIPTION("TI UFS host controller glue driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
deleted file mode 100644
index 4a0bbcf1757a..000000000000
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2020 Intel Corporation
-
-#include <linux/debugfs.h>
-
-#include "ufs-debugfs.h"
-#include "ufshcd.h"
-
-static struct dentry *ufs_debugfs_root;
-
-struct ufs_debugfs_attr {
- const char *name;
- mode_t mode;
- const struct file_operations *fops;
-};
-
-/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */
-static inline struct ufs_hba *hba_from_file(const struct file *file)
-{
- return d_inode(file->f_path.dentry->d_parent)->i_private;
-}
-
-void __init ufs_debugfs_init(void)
-{
- ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
-}
-
-void ufs_debugfs_exit(void)
-{
- debugfs_remove_recursive(ufs_debugfs_root);
-}
-
-static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
-{
- struct ufs_hba *hba = hba_from_file(s->file);
- struct ufs_event_hist *e = hba->ufs_stats.event;
-
-#define PRT(fmt, typ) \
- seq_printf(s, fmt, e[UFS_EVT_ ## typ].cnt)
-
- PRT("PHY Adapter Layer errors (except LINERESET): %llu\n", PA_ERR);
- PRT("Data Link Layer errors: %llu\n", DL_ERR);
- PRT("Network Layer errors: %llu\n", NL_ERR);
- PRT("Transport Layer errors: %llu\n", TL_ERR);
- PRT("Generic DME errors: %llu\n", DME_ERR);
- PRT("Auto-hibernate errors: %llu\n", AUTO_HIBERN8_ERR);
- PRT("IS Fatal errors (CEFES, SBFES, HCFES, DFES): %llu\n", FATAL_ERR);
- PRT("DME Link Startup errors: %llu\n", LINK_STARTUP_FAIL);
- PRT("PM Resume errors: %llu\n", RESUME_ERR);
- PRT("PM Suspend errors : %llu\n", SUSPEND_ERR);
- PRT("Logical Unit Resets: %llu\n", DEV_RESET);
- PRT("Host Resets: %llu\n", HOST_RESET);
- PRT("SCSI command aborts: %llu\n", ABORT);
-#undef PRT
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats);
-
-static int ee_usr_mask_get(void *data, u64 *val)
-{
- struct ufs_hba *hba = data;
-
- *val = hba->ee_usr_mask;
- return 0;
-}
-
-static int ufs_debugfs_get_user_access(struct ufs_hba *hba)
-__acquires(&hba->host_sem)
-{
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- up(&hba->host_sem);
- return -EBUSY;
- }
- ufshcd_rpm_get_sync(hba);
- return 0;
-}
-
-static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
-__releases(&hba->host_sem)
-{
- ufshcd_rpm_put_sync(hba);
- up(&hba->host_sem);
-}
-
-static int ee_usr_mask_set(void *data, u64 val)
-{
- struct ufs_hba *hba = data;
- int err;
-
- if (val & ~(u64)MASK_EE_STATUS)
- return -EINVAL;
- err = ufs_debugfs_get_user_access(hba);
- if (err)
- return err;
- err = ufshcd_update_ee_usr_mask(hba, val, MASK_EE_STATUS);
- ufs_debugfs_put_user_access(hba);
- return err;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n");
-
-void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status)
-{
- bool chgd = false;
- u16 ee_ctrl_mask;
- int err = 0;
-
- if (!hba->debugfs_ee_rate_limit_ms || !status)
- return;
-
- mutex_lock(&hba->ee_ctrl_mutex);
- ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status);
- chgd = ee_ctrl_mask != hba->ee_ctrl_mask;
- if (chgd) {
- err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
- if (err)
- dev_err(hba->dev, "%s: failed to write ee control %d\n",
- __func__, err);
- }
- mutex_unlock(&hba->ee_ctrl_mutex);
-
- if (chgd && !err) {
- unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms);
-
- queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay);
- }
-}
-
-static void ufs_debugfs_restart_ee(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work);
-
- if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) ||
- ufs_debugfs_get_user_access(hba))
- return;
- ufshcd_write_ee_control(hba);
- ufs_debugfs_put_user_access(hba);
-}
-
-static int ufs_saved_err_show(struct seq_file *s, void *data)
-{
- struct ufs_debugfs_attr *attr = s->private;
- struct ufs_hba *hba = hba_from_file(s->file);
- const int *p;
-
- if (strcmp(attr->name, "saved_err") == 0) {
- p = &hba->saved_err;
- } else if (strcmp(attr->name, "saved_uic_err") == 0) {
- p = &hba->saved_uic_err;
- } else {
- return -ENOENT;
- }
-
- seq_printf(s, "%d\n", *p);
- return 0;
-}
-
-static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ufs_debugfs_attr *attr = file->f_inode->i_private;
- struct ufs_hba *hba = hba_from_file(file);
- char val_str[16] = { };
- int val, ret;
-
- if (count > sizeof(val_str))
- return -EINVAL;
- if (copy_from_user(val_str, buf, count))
- return -EFAULT;
- ret = kstrtoint(val_str, 0, &val);
- if (ret < 0)
- return ret;
-
- spin_lock_irq(hba->host->host_lock);
- if (strcmp(attr->name, "saved_err") == 0) {
- hba->saved_err = val;
- } else if (strcmp(attr->name, "saved_uic_err") == 0) {
- hba->saved_uic_err = val;
- } else {
- ret = -ENOENT;
- }
- if (ret == 0)
- ufshcd_schedule_eh_work(hba);
- spin_unlock_irq(hba->host->host_lock);
-
- return ret < 0 ? ret : count;
-}
-
-static int ufs_saved_err_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ufs_saved_err_show, inode->i_private);
-}
-
-static const struct file_operations ufs_saved_err_fops = {
- .owner = THIS_MODULE,
- .open = ufs_saved_err_open,
- .read = seq_read,
- .write = ufs_saved_err_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct ufs_debugfs_attr ufs_attrs[] = {
- { "stats", 0400, &ufs_debugfs_stats_fops },
- { "saved_err", 0600, &ufs_saved_err_fops },
- { "saved_uic_err", 0600, &ufs_saved_err_fops },
- { }
-};
-
-void ufs_debugfs_hba_init(struct ufs_hba *hba)
-{
- const struct ufs_debugfs_attr *attr;
- struct dentry *root;
-
- /* Set default exception event rate limit period to 20ms */
- hba->debugfs_ee_rate_limit_ms = 20;
- INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
-
- root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
- if (IS_ERR_OR_NULL(root))
- return;
- hba->debugfs_root = root;
- d_inode(root)->i_private = hba;
- for (attr = ufs_attrs; attr->name; attr++)
- debugfs_create_file(attr->name, attr->mode, root, (void *)attr,
- attr->fops);
- debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
- hba, &ee_usr_mask_fops);
- debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
- &hba->debugfs_ee_rate_limit_ms);
-}
-
-void ufs_debugfs_hba_exit(struct ufs_hba *hba)
-{
- debugfs_remove_recursive(hba->debugfs_root);
- cancel_delayed_work_sync(&hba->debugfs_ee_work);
-}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
deleted file mode 100644
index 97548a3f90eb..000000000000
--- a/drivers/scsi/ufs/ufs-debugfs.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2020 Intel Corporation
- */
-
-#ifndef __UFS_DEBUGFS_H__
-#define __UFS_DEBUGFS_H__
-
-struct ufs_hba;
-
-#ifdef CONFIG_DEBUG_FS
-void __init ufs_debugfs_init(void);
-void ufs_debugfs_exit(void);
-void ufs_debugfs_hba_init(struct ufs_hba *hba);
-void ufs_debugfs_hba_exit(struct ufs_hba *hba);
-void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status);
-#else
-static inline void ufs_debugfs_init(void) {}
-static inline void ufs_debugfs_exit(void) {}
-static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {}
-static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {}
-static inline void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status) {}
-#endif
-
-#endif
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
deleted file mode 100644
index cd26bc82462e..000000000000
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ /dev/null
@@ -1,1629 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UFS Host Controller driver for Exynos specific extensions
- *
- * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
- * Author: Seungwon Jeon <essuuj@gmail.com>
- * Author: Alim Akhtar <alim.akhtar@samsung.com>
- *
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/mfd/syscon.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "ufshci.h"
-#include "unipro.h"
-
-#include "ufs-exynos.h"
-
-/*
- * Exynos's Vendor specific registers for UFSHCI
- */
-#define HCI_TXPRDT_ENTRY_SIZE 0x00
-#define PRDT_PREFECT_EN BIT(31)
-#define PRDT_SET_SIZE(x) ((x) & 0x1F)
-#define HCI_RXPRDT_ENTRY_SIZE 0x04
-#define HCI_1US_TO_CNT_VAL 0x0C
-#define CNT_VAL_1US_MASK 0x3FF
-#define HCI_UTRL_NEXUS_TYPE 0x40
-#define HCI_UTMRL_NEXUS_TYPE 0x44
-#define HCI_SW_RST 0x50
-#define UFS_LINK_SW_RST BIT(0)
-#define UFS_UNIPRO_SW_RST BIT(1)
-#define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST)
-#define HCI_DATA_REORDER 0x60
-#define HCI_UNIPRO_APB_CLK_CTRL 0x68
-#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF))
-#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C
-#define HCI_GPIO_OUT 0x70
-#define HCI_ERR_EN_PA_LAYER 0x78
-#define HCI_ERR_EN_DL_LAYER 0x7C
-#define HCI_ERR_EN_N_LAYER 0x80
-#define HCI_ERR_EN_T_LAYER 0x84
-#define HCI_ERR_EN_DME_LAYER 0x88
-#define HCI_CLKSTOP_CTRL 0xB0
-#define REFCLKOUT_STOP BIT(4)
-#define REFCLK_STOP BIT(2)
-#define UNIPRO_MCLK_STOP BIT(1)
-#define UNIPRO_PCLK_STOP BIT(0)
-#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
- UNIPRO_MCLK_STOP |\
- UNIPRO_PCLK_STOP)
-#define HCI_MISC 0xB4
-#define REFCLK_CTRL_EN BIT(7)
-#define UNIPRO_PCLK_CTRL_EN BIT(6)
-#define UNIPRO_MCLK_CTRL_EN BIT(5)
-#define HCI_CORECLK_CTRL_EN BIT(4)
-#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\
- UNIPRO_PCLK_CTRL_EN |\
- UNIPRO_MCLK_CTRL_EN)
-/* Device fatal error */
-#define DFES_ERR_EN BIT(31)
-#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
- UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
-#define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\
- UIC_NETWORK_BAD_DEVICEID_ENC |\
- UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING)
-#define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\
- UIC_TRANSPORT_UNKNOWN_CPORTID |\
- UIC_TRANSPORT_NO_CONNECTION_RX |\
- UIC_TRANSPORT_BAD_TC)
-
-/* FSYS UFS Shareability */
-#define UFS_WR_SHARABLE BIT(2)
-#define UFS_RD_SHARABLE BIT(1)
-#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
-#define UFS_SHAREABILITY_OFFSET 0x710
-
-/* Multi-host registers */
-#define MHCTRL 0xC4
-#define MHCTRL_EN_VH_MASK (0xE)
-#define MHCTRL_EN_VH(vh) (vh << 1)
-#define PH2VH_MBOX 0xD8
-
-#define MH_MSG_MASK (0xFF)
-
-#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF))
-#define MH_MSG_PH_READY 0x1
-#define MH_MSG_VH_READY 0x2
-
-#define ALLOW_INQUIRY BIT(25)
-#define ALLOW_MODE_SELECT BIT(24)
-#define ALLOW_MODE_SENSE BIT(23)
-#define ALLOW_PRE_FETCH GENMASK(22, 21)
-#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */
-#define ALLOW_READ_BUFFER BIT(17)
-#define ALLOW_READ_CAPACITY GENMASK(16, 15)
-#define ALLOW_REPORT_LUNS BIT(14)
-#define ALLOW_REQUEST_SENSE BIT(13)
-#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7)
-#define ALLOW_TEST_UNIT_READY BIT(6)
-#define ALLOW_UNMAP BIT(5)
-#define ALLOW_VERIFY BIT(4)
-#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */
-
-#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \
- ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \
- ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \
- ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \
- ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \
- ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \
- ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL)
-
-#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C
-#define HCI_MH_IID_IN_TASK_TAG 0X308
-
-#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC)
-
-enum {
- UNIPRO_L1_5 = 0,/* PHY Adapter */
- UNIPRO_L2, /* Data Link */
- UNIPRO_L3, /* Network */
- UNIPRO_L4, /* Transport */
- UNIPRO_DME, /* DME */
-};
-
-/*
- * UNIPRO registers
- */
-#define UNIPRO_COMP_VERSION 0x000
-#define UNIPRO_DME_PWR_REQ 0x090
-#define UNIPRO_DME_PWR_REQ_POWERMODE 0x094
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C
-#define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8
-#define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC
-
-/*
- * UFS Protector registers
- */
-#define UFSPRSECURITY 0x010
-#define NSSMU BIT(14)
-#define UFSPSBEGIN0 0x200
-#define UFSPSEND0 0x204
-#define UFSPSLUN0 0x208
-#define UFSPSCTRL0 0x20C
-
-#define CNTR_DIV_VAL 40
-
-static struct exynos_ufs_drv_data exynos_ufs_drvs;
-static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
-static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
-
-static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs)
-{
- exynos_ufs_auto_ctrl_hcc(ufs, true);
-}
-
-static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs)
-{
- exynos_ufs_auto_ctrl_hcc(ufs, false);
-}
-
-static inline void exynos_ufs_disable_auto_ctrl_hcc_save(
- struct exynos_ufs *ufs, u32 *val)
-{
- *val = hci_readl(ufs, HCI_MISC);
- exynos_ufs_auto_ctrl_hcc(ufs, false);
-}
-
-static inline void exynos_ufs_auto_ctrl_hcc_restore(
- struct exynos_ufs *ufs, u32 *val)
-{
- hci_writel(ufs, *val, HCI_MISC);
-}
-
-static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs)
-{
- exynos_ufs_ctrl_clkstop(ufs, true);
-}
-
-static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
-{
- exynos_ufs_ctrl_clkstop(ufs, false);
-}
-
-static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
-{
- return 0;
-}
-
-static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
-{
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
-
- /* IO Coherency setting */
- if (ufs->sysreg) {
- return regmap_update_bits(ufs->sysreg,
- ufs->shareability_reg_offset,
- UFS_SHARABLE, UFS_SHARABLE);
- }
-
- attr->tx_dif_p_nsec = 3200000;
-
- return 0;
-}
-
-static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
-
- /* Enable Virtual Host #1 */
- ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL);
- /* Default VH Transfer permissions */
- hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH);
- /* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */
- hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG);
-
- return 0;
-}
-
-static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- int i;
- u32 tx_line_reset_period, rx_line_reset_period;
-
- rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
- tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
- for_each_ufs_rx_lane(ufs, i) {
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
- DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
- (rx_line_reset_period >> 16) & 0xFF);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
- (rx_line_reset_period >> 8) & 0xFF);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
- (rx_line_reset_period) & 0xFF);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
- }
-
- for_each_ufs_tx_lane(ufs, i) {
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
- DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
- /* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
- 0x02);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
- (tx_line_reset_period >> 16) & 0xFF);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
- (tx_line_reset_period >> 8) & 0xFF);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
- (tx_line_reset_period) & 0xFF);
-
- /* TX PWM Gear Capability / PWM_G1_ONLY */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1);
- }
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000);
-
- return 0;
-}
-
-static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
-{
- struct ufs_hba *hba = ufs->hba;
-
- /* PACP_PWR_req and delivered to the remote DME */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
-
- return 0;
-}
-
-static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
-{
- struct ufs_hba *hba = ufs->hba;
- u32 enabled_vh;
-
- enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK;
-
- /* Send physical host ready message to virtual hosts */
- ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX);
-
- return 0;
-}
-
-static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite;
- int i;
-
- exynos_ufs_enable_ov_tm(hba);
- for_each_ufs_tx_lane(ufs, i)
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17);
- for_each_ufs_rx_lane(ufs, i) {
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00);
- }
- exynos_ufs_disable_ov_tm(hba);
-
- for_each_ufs_tx_lane(ufs, i)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1);
- udelay(1);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12));
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1);
- udelay(1600);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val);
-
- return 0;
-}
-
-static int exynos7_ufs_post_link(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- int i;
-
- exynos_ufs_enable_ov_tm(hba);
- for_each_ufs_tx_lane(ufs, i) {
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i),
- TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000)));
- }
- exynos_ufs_disable_ov_tm(hba);
-
- exynos_ufs_enable_dbg_mode(hba);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8);
- exynos_ufs_disable_dbg_mode(hba);
-
- return 0;
-}
-
-static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
-{
- unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE);
-
- return 0;
-}
-
-static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
-{
- struct ufs_hba *hba = ufs->hba;
- int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1);
-
- if (lanes == 1) {
- exynos_ufs_enable_dbg_mode(hba);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1);
- exynos_ufs_disable_dbg_mode(hba);
- }
-
- return 0;
-}
-
-/*
- * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
- * Control should be disabled in the below cases
- * - Before host controller S/W reset
- * - Access to UFS protector's register
- */
-static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en)
-{
- u32 misc = hci_readl(ufs, HCI_MISC);
-
- if (en)
- hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC);
- else
- hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC);
-}
-
-static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en)
-{
- u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL);
- u32 misc = hci_readl(ufs, HCI_MISC);
-
- if (en) {
- hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC);
- hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
- } else {
- hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
- hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC);
- }
-}
-
-static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- struct list_head *head = &hba->clk_list_head;
- struct ufs_clk_info *clki;
- unsigned long pclk_rate;
- u32 f_min, f_max;
- u8 div = 0;
- int ret = 0;
-
- if (list_empty(head))
- goto out;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR(clki->clk)) {
- if (!strcmp(clki->name, "core_clk"))
- ufs->clk_hci_core = clki->clk;
- else if (!strcmp(clki->name, "sclk_unipro_main"))
- ufs->clk_unipro_main = clki->clk;
- }
- }
-
- if (!ufs->clk_hci_core || !ufs->clk_unipro_main) {
- dev_err(hba->dev, "failed to get clk info\n");
- ret = -EINVAL;
- goto out;
- }
-
- ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main);
- pclk_rate = clk_get_rate(ufs->clk_hci_core);
- f_min = ufs->pclk_avail_min;
- f_max = ufs->pclk_avail_max;
-
- if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
- do {
- pclk_rate /= (div + 1);
-
- if (pclk_rate <= f_max)
- break;
- div++;
- } while (pclk_rate >= f_min);
- }
-
- if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
- dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
- ret = -EINVAL;
- goto out;
- }
-
- ufs->pclk_rate = pclk_rate;
- ufs->pclk_div = div;
-
-out:
- return ret;
-}
-
-static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs)
-{
- if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
- u32 val;
-
- val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL);
- hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div),
- HCI_UNIPRO_APB_CLK_CTRL);
- }
-}
-
-static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
-
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl);
-}
-
-static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
- const unsigned int div = 30, mult = 20;
- const unsigned long pwm_min = 3 * 1000 * 1000;
- const unsigned long pwm_max = 9 * 1000 * 1000;
- const int divs[] = {32, 16, 8, 4};
- unsigned long clk = 0, _clk, clk_period;
- int i = 0, clk_idx = -1;
-
- clk_period = UNIPRO_PCLK_PERIOD(ufs);
- for (i = 0; i < ARRAY_SIZE(divs); i++) {
- _clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div);
- if (_clk >= pwm_min && _clk <= pwm_max) {
- if (_clk > clk) {
- clk_idx = i;
- clk = _clk;
- }
- }
- }
-
- if (clk_idx == -1) {
- ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx);
- dev_err(hba->dev,
- "failed to decide pwm clock divider, will not change\n");
- }
-
- attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK;
-}
-
-long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period)
-{
- const int precise = 10;
- long pclk_rate = ufs->pclk_rate;
- long clk_period, fraction;
-
- clk_period = UNIPRO_PCLK_PERIOD(ufs);
- fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate;
-
- return (period * precise) / ((clk_period * precise) + fraction);
-}
-
-static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs)
-{
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
- struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
-
- t_cfg->tx_linereset_p =
- exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
- t_cfg->tx_linereset_n =
- exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec);
- t_cfg->tx_high_z_cnt =
- exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec);
- t_cfg->tx_base_n_val =
- exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec);
- t_cfg->tx_gran_n_val =
- exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec);
- t_cfg->tx_sleep_cnt =
- exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt);
-
- t_cfg->rx_linereset =
- exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec);
- t_cfg->rx_hibern8_wait =
- exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec);
- t_cfg->rx_base_n_val =
- exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec);
- t_cfg->rx_gran_n_val =
- exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec);
- t_cfg->rx_sleep_cnt =
- exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt);
- t_cfg->rx_stall_cnt =
- exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt);
-}
-
-static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
- int i;
-
- exynos_ufs_set_pwm_clk_div(ufs);
-
- exynos_ufs_enable_ov_tm(hba);
-
- for_each_ufs_rx_lane(ufs, i) {
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i),
- ufs->drv_data->uic_attr->rx_filler_enable);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i),
- RX_LINERESET(t_cfg->rx_linereset));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i),
- RX_BASE_NVAL_L(t_cfg->rx_base_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i),
- RX_BASE_NVAL_H(t_cfg->rx_base_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i),
- RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i),
- RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i),
- RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i),
- RX_OV_STALL_CNT(t_cfg->rx_stall_cnt));
- }
-
- for_each_ufs_tx_lane(ufs, i) {
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i),
- TX_LINERESET_P(t_cfg->tx_linereset_p));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i),
- TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i),
- TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i),
- TX_BASE_NVAL_L(t_cfg->tx_base_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i),
- TX_BASE_NVAL_H(t_cfg->tx_base_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i),
- TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i),
- TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i),
- TX_OV_H8_ENTER_EN |
- TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt));
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i),
- ufs->drv_data->uic_attr->tx_min_activatetime);
- }
-
- exynos_ufs_disable_ov_tm(hba);
-}
-
-static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
- int i;
-
- exynos_ufs_enable_ov_tm(hba);
-
- for_each_ufs_rx_lane(ufs, i) {
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i),
- attr->rx_hs_g1_sync_len_cap);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i),
- attr->rx_hs_g2_sync_len_cap);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i),
- attr->rx_hs_g3_sync_len_cap);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i),
- attr->rx_hs_g1_prep_sync_len_cap);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i),
- attr->rx_hs_g2_prep_sync_len_cap);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i),
- attr->rx_hs_g3_prep_sync_len_cap);
- }
-
- if (attr->rx_adv_fine_gran_sup_en == 0) {
- for_each_ufs_rx_lane(ufs, i) {
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0);
-
- if (attr->rx_min_actv_time_cap)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP,
- i), attr->rx_min_actv_time_cap);
-
- if (attr->rx_hibern8_time_cap)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i),
- attr->rx_hibern8_time_cap);
- }
- } else if (attr->rx_adv_fine_gran_sup_en == 1) {
- for_each_ufs_rx_lane(ufs, i) {
- if (attr->rx_adv_fine_gran_step)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP,
- i), RX_ADV_FINE_GRAN_STEP(
- attr->rx_adv_fine_gran_step));
-
- if (attr->rx_adv_min_actv_time_cap)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(
- RX_ADV_MIN_ACTIVATETIME_CAP, i),
- attr->rx_adv_min_actv_time_cap);
-
- if (attr->rx_adv_hibern8_time_cap)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP,
- i),
- attr->rx_adv_hibern8_time_cap);
- }
- }
-
- exynos_ufs_disable_ov_tm(hba);
-}
-
-static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- enum {
- DEV_ID = 0x00,
- PEER_DEV_ID = 0x01,
- PEER_CPORT_ID = 0x00,
- TRAFFIC_CLASS = 0x00,
- };
-
- /* allow cport attributes to be set */
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE);
-
- /* local unipro attributes */
- ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
- ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), TRUE);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS);
- ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED);
-}
-
-static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
-{
- u32 reg, val;
-
- exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
-
- /* make encryption disabled by default */
- reg = ufsp_readl(ufs, UFSPRSECURITY);
- ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY);
- ufsp_writel(ufs, 0x0, UFSPSBEGIN0);
- ufsp_writel(ufs, 0xffffffff, UFSPSEND0);
- ufsp_writel(ufs, 0xff, UFSPSLUN0);
- ufsp_writel(ufs, 0xf1, UFSPSCTRL0);
-
- exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
-}
-
-static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
-{
- struct ufs_hba *hba = ufs->hba;
- u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx);
- u32 mask, sync_len;
- enum {
- SYNC_LEN_G1 = 80 * 1000, /* 80us */
- SYNC_LEN_G2 = 40 * 1000, /* 44us */
- SYNC_LEN_G3 = 20 * 1000, /* 20us */
- };
- int i;
-
- if (g == 1)
- sync_len = SYNC_LEN_G1;
- else if (g == 2)
- sync_len = SYNC_LEN_G2;
- else if (g == 3)
- sync_len = SYNC_LEN_G3;
- else
- return;
-
- mask = exynos_ufs_calc_time_cntr(ufs, sync_len);
- mask = (mask >> 8) & 0xff;
-
- exynos_ufs_enable_ov_tm(hba);
-
- for_each_ufs_rx_lane(ufs, i)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask);
-
- exynos_ufs_disable_ov_tm(hba);
-}
-
-static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- struct phy *generic_phy = ufs->phy;
- struct ufs_dev_params ufs_exynos_cap;
- int ret;
-
- if (!dev_req_params) {
- pr_err("%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- ufshcd_init_pwr_dev_param(&ufs_exynos_cap);
-
- ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
- dev_max_params, dev_req_params);
- if (ret) {
- pr_err("%s: failed to determine capabilities\n", __func__);
- goto out;
- }
-
- if (ufs->drv_data->pre_pwr_change)
- ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
-
- if (ufshcd_is_hs_mode(dev_req_params)) {
- exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params);
-
- switch (dev_req_params->hs_rate) {
- case PA_HS_MODE_A:
- case PA_HS_MODE_B:
- phy_calibrate(generic_phy);
- break;
- }
- }
-
- /* setting for three timeout values for traffic class #0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
-
- return 0;
-out:
- return ret;
-}
-
-#define PWR_MODE_STR_LEN 64
-static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_req)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- struct phy *generic_phy = ufs->phy;
- int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx);
- int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx);
- char pwr_str[PWR_MODE_STR_LEN] = "";
-
- /* let default be PWM Gear 1, Lane 1 */
- if (!gear)
- gear = 1;
-
- if (!lanes)
- lanes = 1;
-
- if (ufs->drv_data->post_pwr_change)
- ufs->drv_data->post_pwr_change(ufs, pwr_req);
-
- if ((ufshcd_is_hs_mode(pwr_req))) {
- switch (pwr_req->hs_rate) {
- case PA_HS_MODE_A:
- case PA_HS_MODE_B:
- phy_calibrate(generic_phy);
- break;
- }
-
- snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d",
- "FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B",
- gear, lanes);
- } else {
- snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d",
- "SLOW", gear, lanes);
- }
-
- dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str);
-
- return 0;
-}
-
-static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
- int tag, bool op)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- u32 type;
-
- type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
-
- if (op)
- hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
- else
- hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
-}
-
-static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba,
- int tag, u8 func)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- u32 type;
-
- type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE);
-
- switch (func) {
- case UFS_ABORT_TASK:
- case UFS_QUERY_TASK:
- hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE);
- break;
- case UFS_ABORT_TASK_SET:
- case UFS_CLEAR_TASK_SET:
- case UFS_LOGICAL_RESET:
- case UFS_QUERY_TASK_SET:
- hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE);
- break;
- }
-}
-
-static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
- struct phy *generic_phy = ufs->phy;
- int ret = 0;
-
- if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) {
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
- &ufs->avail_ln_rx);
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
- &ufs->avail_ln_tx);
- WARN(ufs->avail_ln_rx != ufs->avail_ln_tx,
- "available data lane is not equal(rx:%d, tx:%d)\n",
- ufs->avail_ln_rx, ufs->avail_ln_tx);
- }
-
- phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
- ret = phy_init(generic_phy);
- if (ret) {
- dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
- __func__, ret);
- goto out_exit_phy;
- }
-
- return 0;
-
-out_exit_phy:
- phy_exit(generic_phy);
-
- return ret;
-}
-
-static void exynos_ufs_config_unipro(struct exynos_ufs *ufs)
-{
- struct ufs_hba *hba = ufs->hba;
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
- DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS),
- ufs->drv_data->uic_attr->tx_trailingclks);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE),
- ufs->drv_data->uic_attr->pa_dbg_option_suite);
-}
-
-static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
-{
- switch (index) {
- case UNIPRO_L1_5:
- hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER);
- break;
- case UNIPRO_L2:
- hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER);
- break;
- case UNIPRO_L3:
- hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER);
- break;
- case UNIPRO_L4:
- hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER);
- break;
- case UNIPRO_DME:
- hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER);
- break;
- }
-}
-
-static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
- if (!ufs)
- return 0;
-
- if (on && status == PRE_CHANGE) {
- if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
- exynos_ufs_disable_auto_ctrl_hcc(ufs);
- exynos_ufs_ungate_clks(ufs);
- } else if (!on && status == POST_CHANGE) {
- exynos_ufs_gate_clks(ufs);
- if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
- exynos_ufs_enable_auto_ctrl_hcc(ufs);
- }
-
- return 0;
-}
-
-static int exynos_ufs_pre_link(struct ufs_hba *hba)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
- /* hci */
- exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2);
- exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3);
- exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
- exynos_ufs_set_unipro_pclk_div(ufs);
-
- /* unipro */
- exynos_ufs_config_unipro(ufs);
-
- /* m-phy */
- exynos_ufs_phy_init(ufs);
- if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
- exynos_ufs_config_phy_time_attr(ufs);
- exynos_ufs_config_phy_cap_attr(ufs);
- }
-
- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
-
- if (ufs->drv_data->pre_link)
- ufs->drv_data->pre_link(ufs);
-
- return 0;
-}
-
-static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
-{
- u32 val;
-
- val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL);
- hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
-}
-
-static int exynos_ufs_post_link(struct ufs_hba *hba)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- struct phy *generic_phy = ufs->phy;
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
-
- exynos_ufs_establish_connt(ufs);
- exynos_ufs_fit_aggr_timeout(ufs);
-
- hci_writel(ufs, 0xa, HCI_DATA_REORDER);
- hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
- hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
- hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
- hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
- hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
-
- if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), TRUE);
-
- if (attr->pa_granularity) {
- exynos_ufs_enable_dbg_mode(hba);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY),
- attr->pa_granularity);
- exynos_ufs_disable_dbg_mode(hba);
-
- if (attr->pa_tactivate)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- attr->pa_tactivate);
- if (attr->pa_hibern8time &&
- !(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER))
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
- attr->pa_hibern8time);
- }
-
- if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
- if (!attr->pa_granularity)
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &attr->pa_granularity);
- if (!attr->pa_hibern8time)
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
- &attr->pa_hibern8time);
- /*
- * not wait for HIBERN8 time to exit hibernation
- */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0);
-
- if (attr->pa_granularity < 1 || attr->pa_granularity > 6) {
- /* Valid range for granularity: 1 ~ 6 */
- dev_warn(hba->dev,
- "%s: pa_granularity %d is invalid, assuming backwards compatibility\n",
- __func__,
- attr->pa_granularity);
- attr->pa_granularity = 6;
- }
- }
-
- phy_calibrate(generic_phy);
-
- if (ufs->drv_data->post_link)
- ufs->drv_data->post_link(ufs);
-
- return 0;
-}
-
-static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
-{
- struct device_node *np = dev->of_node;
- struct exynos_ufs_uic_attr *attr;
- int ret = 0;
-
- ufs->drv_data = device_get_match_data(dev);
-
- if (ufs->drv_data && ufs->drv_data->uic_attr) {
- attr = ufs->drv_data->uic_attr;
- } else {
- dev_err(dev, "failed to get uic attributes\n");
- ret = -EINVAL;
- goto out;
- }
-
- ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
- if (IS_ERR(ufs->sysreg))
- ufs->sysreg = NULL;
- else {
- if (of_property_read_u32_index(np, "samsung,sysreg", 1,
- &ufs->shareability_reg_offset)) {
- dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
- ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
- }
- }
-
- ufs->pclk_avail_min = PCLK_AVAIL_MIN;
- ufs->pclk_avail_max = PCLK_AVAIL_MAX;
-
- attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN;
- attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL;
- attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP;
- attr->pa_granularity = PA_GRANULARITY_VAL;
- attr->pa_tactivate = PA_TACTIVATE_VAL;
- attr->pa_hibern8time = PA_HIBERN8TIME_VAL;
-
-out:
- return ret;
-}
-
-static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
- struct exynos_ufs *ufs)
-{
- ufs->hba = hba;
- ufs->opts = ufs->drv_data->opts;
- ufs->rx_sel_idx = PA_MAXDATALANES;
- if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
- ufs->rx_sel_idx = 0;
- hba->priv = (void *)ufs;
- hba->quirks = ufs->drv_data->quirks;
-}
-
-static int exynos_ufs_init(struct ufs_hba *hba)
-{
- struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_ufs *ufs;
- int ret;
-
- ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
- if (!ufs)
- return -ENOMEM;
-
- /* exynos-specific hci */
- ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
- if (IS_ERR(ufs->reg_hci)) {
- dev_err(dev, "cannot ioremap for hci vendor register\n");
- return PTR_ERR(ufs->reg_hci);
- }
-
- /* unipro */
- ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
- if (IS_ERR(ufs->reg_unipro)) {
- dev_err(dev, "cannot ioremap for unipro register\n");
- return PTR_ERR(ufs->reg_unipro);
- }
-
- /* ufs protector */
- ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
- if (IS_ERR(ufs->reg_ufsp)) {
- dev_err(dev, "cannot ioremap for ufs protector register\n");
- return PTR_ERR(ufs->reg_ufsp);
- }
-
- ret = exynos_ufs_parse_dt(dev, ufs);
- if (ret) {
- dev_err(dev, "failed to get dt info.\n");
- goto out;
- }
-
- ufs->phy = devm_phy_get(dev, "ufs-phy");
- if (IS_ERR(ufs->phy)) {
- ret = PTR_ERR(ufs->phy);
- dev_err(dev, "failed to get ufs-phy\n");
- goto out;
- }
-
- ret = phy_power_on(ufs->phy);
- if (ret)
- goto phy_off;
-
- exynos_ufs_priv_init(hba, ufs);
-
- if (ufs->drv_data->drv_init) {
- ret = ufs->drv_data->drv_init(dev, ufs);
- if (ret) {
- dev_err(dev, "failed to init drv-data\n");
- goto out;
- }
- }
-
- ret = exynos_ufs_get_clk_info(ufs);
- if (ret)
- goto out;
- exynos_ufs_specify_phy_time_attr(ufs);
- exynos_ufs_config_smu(ufs);
- return 0;
-
-phy_off:
- phy_power_off(ufs->phy);
-out:
- hba->priv = NULL;
- return ret;
-}
-
-static int exynos_ufs_host_reset(struct ufs_hba *hba)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- unsigned long timeout = jiffies + msecs_to_jiffies(1);
- u32 val;
- int ret = 0;
-
- exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
-
- hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST);
-
- do {
- if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK))
- goto out;
- } while (time_before(jiffies, timeout));
-
- dev_err(hba->dev, "timeout host sw-reset\n");
- ret = -ETIMEDOUT;
-
-out:
- exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
- return ret;
-}
-
-static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
- hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
- udelay(5);
- hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
-}
-
-static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
-
- if (!enter) {
- if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
- exynos_ufs_disable_auto_ctrl_hcc(ufs);
- exynos_ufs_ungate_clks(ufs);
-
- if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
- static const unsigned int granularity_tbl[] = {
- 1, 4, 8, 16, 32, 100
- };
- int h8_time = attr->pa_hibern8time *
- granularity_tbl[attr->pa_granularity - 1];
- unsigned long us;
- s64 delta;
-
- do {
- delta = h8_time - ktime_us_delta(ktime_get(),
- ufs->entry_hibern8_t);
- if (delta <= 0)
- break;
-
- us = min_t(s64, delta, USEC_PER_MSEC);
- if (us >= 10)
- usleep_range(us, us + 10);
- } while (1);
- }
- }
-}
-
-static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
- if (!enter) {
- u32 cur_mode = 0;
- u32 pwrmode;
-
- if (ufshcd_is_hs_mode(&ufs->dev_req_params))
- pwrmode = FAST_MODE;
- else
- pwrmode = SLOW_MODE;
-
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode);
- if (cur_mode != (pwrmode << 4 | pwrmode)) {
- dev_warn(hba->dev, "%s: power mode change\n", __func__);
- hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf;
- hba->pwr_info.pwr_tx = cur_mode & 0xf;
- ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- }
-
- if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
- exynos_ufs_establish_connt(ufs);
- } else {
- ufs->entry_hibern8_t = ktime_get();
- exynos_ufs_gate_clks(ufs);
- if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
- exynos_ufs_enable_auto_ctrl_hcc(ufs);
- }
-}
-
-static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- int ret = 0;
-
- switch (status) {
- case PRE_CHANGE:
- if (ufs->drv_data->pre_hce_enable) {
- ret = ufs->drv_data->pre_hce_enable(ufs);
- if (ret)
- return ret;
- }
-
- ret = exynos_ufs_host_reset(hba);
- if (ret)
- return ret;
- exynos_ufs_dev_hw_reset(hba);
- break;
- case POST_CHANGE:
- exynos_ufs_calc_pwm_clk_div(ufs);
- if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
- exynos_ufs_enable_auto_ctrl_hcc(ufs);
-
- if (ufs->drv_data->post_hce_enable)
- ret = ufs->drv_data->post_hce_enable(ufs);
-
- break;
- }
-
- return ret;
-}
-
-static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int ret = 0;
-
- switch (status) {
- case PRE_CHANGE:
- ret = exynos_ufs_pre_link(hba);
- break;
- case POST_CHANGE:
- ret = exynos_ufs_post_link(hba);
- break;
- }
-
- return ret;
-}
-
-static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- int ret = 0;
-
- switch (status) {
- case PRE_CHANGE:
- ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
- dev_req_params);
- break;
- case POST_CHANGE:
- ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
- break;
- }
-
- return ret;
-}
-
-static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
- enum uic_cmd_dme enter,
- enum ufs_notify_change_status notify)
-{
- switch ((u8)notify) {
- case PRE_CHANGE:
- exynos_ufs_pre_hibern8(hba, enter);
- break;
- case POST_CHANGE:
- exynos_ufs_post_hibern8(hba, enter);
- break;
- }
-}
-
-static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
- enum ufs_notify_change_status status)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
- if (status == PRE_CHANGE)
- return 0;
-
- if (!ufshcd_is_link_active(hba))
- phy_power_off(ufs->phy);
-
- return 0;
-}
-
-static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
- if (!ufshcd_is_link_active(hba))
- phy_power_on(ufs->phy);
-
- exynos_ufs_config_smu(ufs);
-
- return 0;
-}
-
-static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- if (status == POST_CHANGE) {
- ufshcd_set_link_active(hba);
- ufshcd_set_ufs_dev_active(hba);
- }
-
- return 0;
-}
-
-static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba)
-{
- u32 mbox;
- ktime_t start, stop;
-
- start = ktime_get();
- stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS));
-
- do {
- mbox = ufshcd_readl(hba, PH2VH_MBOX);
- /* TODO: Mailbox message protocols between the PH and VHs are
- * not implemented yet. This will be supported later
- */
- if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY)
- return 0;
-
- usleep_range(40, 50);
- } while (ktime_before(ktime_get(), stop));
-
- return -ETIME;
-}
-
-static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
-{
- struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct exynos_ufs *ufs;
- int ret;
-
- ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
- if (!ufs)
- return -ENOMEM;
-
- /* exynos-specific hci */
- ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
- if (IS_ERR(ufs->reg_hci)) {
- dev_err(dev, "cannot ioremap for hci vendor register\n");
- return PTR_ERR(ufs->reg_hci);
- }
-
- ret = exynosauto_ufs_vh_wait_ph_ready(hba);
- if (ret)
- return ret;
-
- ufs->drv_data = device_get_match_data(dev);
- if (!ufs->drv_data)
- return -ENODEV;
-
- exynos_ufs_priv_init(hba, ufs);
-
- return 0;
-}
-
-static struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
- .name = "exynos_ufs",
- .init = exynos_ufs_init,
- .hce_enable_notify = exynos_ufs_hce_enable_notify,
- .link_startup_notify = exynos_ufs_link_startup_notify,
- .pwr_change_notify = exynos_ufs_pwr_change_notify,
- .setup_clocks = exynos_ufs_setup_clocks,
- .setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
- .setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req,
- .hibern8_notify = exynos_ufs_hibern8_notify,
- .suspend = exynos_ufs_suspend,
- .resume = exynos_ufs_resume,
-};
-
-static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
- .name = "exynosauto_ufs_vh",
- .init = exynosauto_ufs_vh_init,
- .link_startup_notify = exynosauto_ufs_vh_link_startup_notify,
-};
-
-static int exynos_ufs_probe(struct platform_device *pdev)
-{
- int err;
- struct device *dev = &pdev->dev;
- const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops;
- const struct exynos_ufs_drv_data *drv_data =
- device_get_match_data(dev);
-
- if (drv_data && drv_data->vops)
- vops = drv_data->vops;
-
- err = ufshcd_pltfrm_init(pdev, vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
-
- return err;
-}
-
-static int exynos_ufs_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
- return 0;
-}
-
-static struct exynos_ufs_uic_attr exynos7_uic_attr = {
- .tx_trailingclks = 0x10,
- .tx_dif_p_nsec = 3000000, /* unit: ns */
- .tx_dif_n_nsec = 1000000, /* unit: ns */
- .tx_high_z_cnt_nsec = 20000, /* unit: ns */
- .tx_base_unit_nsec = 100000, /* unit: ns */
- .tx_gran_unit_nsec = 4000, /* unit: ns */
- .tx_sleep_cnt = 1000, /* unit: ns */
- .tx_min_activatetime = 0xa,
- .rx_filler_enable = 0x2,
- .rx_dif_p_nsec = 1000000, /* unit: ns */
- .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
- .rx_base_unit_nsec = 100000, /* unit: ns */
- .rx_gran_unit_nsec = 4000, /* unit: ns */
- .rx_sleep_cnt = 1280, /* unit: ns */
- .rx_stall_cnt = 320, /* unit: ns */
- .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
- .pa_dbg_option_suite = 0x30103,
-};
-
-static struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
- .uic_attr = &exynos7_uic_attr,
- .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
- UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
- UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
- UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
- .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
- EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
- EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
- .drv_init = exynosauto_ufs_drv_init,
- .post_hce_enable = exynosauto_ufs_post_hce_enable,
- .pre_link = exynosauto_ufs_pre_link,
- .pre_pwr_change = exynosauto_ufs_pre_pwr_change,
- .post_pwr_change = exynosauto_ufs_post_pwr_change,
-};
-
-static struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
- .vops = &ufs_hba_exynosauto_vh_ops,
- .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
- UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
- UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
- UFSHCI_QUIRK_BROKEN_HCE |
- UFSHCD_QUIRK_BROKEN_UIC_CMD |
- UFSHCD_QUIRK_SKIP_PH_CONFIGURATION |
- UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
- .opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
-};
-
-static struct exynos_ufs_drv_data exynos_ufs_drvs = {
- .uic_attr = &exynos7_uic_attr,
- .quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
- UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
- UFSHCI_QUIRK_BROKEN_HCE |
- UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
- UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
- UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
- UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
- .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
- EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
- EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
- EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
- EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER,
- .drv_init = exynos7_ufs_drv_init,
- .pre_link = exynos7_ufs_pre_link,
- .post_link = exynos7_ufs_post_link,
- .pre_pwr_change = exynos7_ufs_pre_pwr_change,
- .post_pwr_change = exynos7_ufs_post_pwr_change,
-};
-
-static const struct of_device_id exynos_ufs_of_match[] = {
- { .compatible = "samsung,exynos7-ufs",
- .data = &exynos_ufs_drvs },
- { .compatible = "samsung,exynosautov9-ufs",
- .data = &exynosauto_ufs_drvs },
- { .compatible = "samsung,exynosautov9-ufs-vh",
- .data = &exynosauto_ufs_vh_drvs },
- {},
-};
-
-static const struct dev_pm_ops exynos_ufs_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
-};
-
-static struct platform_driver exynos_ufs_pltform = {
- .probe = exynos_ufs_probe,
- .remove = exynos_ufs_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "exynos-ufshc",
- .pm = &exynos_ufs_pm_ops,
- .of_match_table = of_match_ptr(exynos_ufs_of_match),
- },
-};
-module_platform_driver(exynos_ufs_pltform);
-
-MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>");
-MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>");
-MODULE_DESCRIPTION("Exynos UFS HCI Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-exynos.h b/drivers/scsi/ufs/ufs-exynos.h
deleted file mode 100644
index 1c33e5466082..000000000000
--- a/drivers/scsi/ufs/ufs-exynos.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UFS Host Controller driver for Exynos specific extensions
- *
- * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
- *
- */
-
-#ifndef _UFS_EXYNOS_H_
-#define _UFS_EXYNOS_H_
-
-/*
- * UNIPRO registers
- */
-#define UNIPRO_DBG_FORCE_DME_CTRL_STATE 0x150
-
-/*
- * MIBs for PA debug registers
- */
-#define PA_DBG_CLK_PERIOD 0x9514
-#define PA_DBG_TXPHY_CFGUPDT 0x9518
-#define PA_DBG_RXPHY_CFGUPDT 0x9519
-#define PA_DBG_MODE 0x9529
-#define PA_DBG_SKIP_RESET_PHY 0x9539
-#define PA_DBG_OV_TM 0x9540
-#define PA_DBG_SKIP_LINE_RESET 0x9541
-#define PA_DBG_LINE_RESET_REQ 0x9543
-#define PA_DBG_OPTION_SUITE 0x9564
-#define PA_DBG_OPTION_SUITE_DYN 0x9565
-
-/*
- * MIBs for Transport Layer debug registers
- */
-#define T_DBG_SKIP_INIT_HIBERN8_EXIT 0xc001
-
-/*
- * Exynos MPHY attributes
- */
-#define TX_LINERESET_N_VAL 0x0277
-#define TX_LINERESET_N(v) (((v) >> 10) & 0xFF)
-#define TX_LINERESET_P_VAL 0x027D
-#define TX_LINERESET_P(v) (((v) >> 12) & 0xFF)
-#define TX_OV_SLEEP_CNT_TIMER 0x028E
-#define TX_OV_H8_ENTER_EN (1 << 7)
-#define TX_OV_SLEEP_CNT(v) (((v) >> 5) & 0x7F)
-#define TX_HIGH_Z_CNT_11_08 0x028C
-#define TX_HIGH_Z_CNT_H(v) (((v) >> 8) & 0xF)
-#define TX_HIGH_Z_CNT_07_00 0x028D
-#define TX_HIGH_Z_CNT_L(v) ((v) & 0xFF)
-#define TX_BASE_NVAL_07_00 0x0293
-#define TX_BASE_NVAL_L(v) ((v) & 0xFF)
-#define TX_BASE_NVAL_15_08 0x0294
-#define TX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF)
-#define TX_GRAN_NVAL_07_00 0x0295
-#define TX_GRAN_NVAL_L(v) ((v) & 0xFF)
-#define TX_GRAN_NVAL_10_08 0x0296
-#define TX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3)
-
-#define VND_TX_CLK_PRD 0xAA
-#define VND_TX_CLK_PRD_EN 0xA9
-#define VND_TX_LINERESET_PVALUE0 0xAD
-#define VND_TX_LINERESET_PVALUE1 0xAC
-#define VND_TX_LINERESET_PVALUE2 0xAB
-
-#define TX_LINE_RESET_TIME 3200
-
-#define VND_RX_CLK_PRD 0x12
-#define VND_RX_CLK_PRD_EN 0x11
-#define VND_RX_LINERESET_VALUE0 0x1D
-#define VND_RX_LINERESET_VALUE1 0x1C
-#define VND_RX_LINERESET_VALUE2 0x1B
-
-#define RX_LINE_RESET_TIME 1000
-
-#define RX_FILLER_ENABLE 0x0316
-#define RX_FILLER_EN (1 << 1)
-#define RX_LINERESET_VAL 0x0317
-#define RX_LINERESET(v) (((v) >> 12) & 0xFF)
-#define RX_LCC_IGNORE 0x0318
-#define RX_SYNC_MASK_LENGTH 0x0321
-#define RX_HIBERN8_WAIT_VAL_BIT_20_16 0x0331
-#define RX_HIBERN8_WAIT_VAL_BIT_15_08 0x0332
-#define RX_HIBERN8_WAIT_VAL_BIT_07_00 0x0333
-#define RX_OV_SLEEP_CNT_TIMER 0x0340
-#define RX_OV_SLEEP_CNT(v) (((v) >> 6) & 0x1F)
-#define RX_OV_STALL_CNT_TIMER 0x0341
-#define RX_OV_STALL_CNT(v) (((v) >> 4) & 0xFF)
-#define RX_BASE_NVAL_07_00 0x0355
-#define RX_BASE_NVAL_L(v) ((v) & 0xFF)
-#define RX_BASE_NVAL_15_08 0x0354
-#define RX_BASE_NVAL_H(v) (((v) >> 8) & 0xFF)
-#define RX_GRAN_NVAL_07_00 0x0353
-#define RX_GRAN_NVAL_L(v) ((v) & 0xFF)
-#define RX_GRAN_NVAL_10_08 0x0352
-#define RX_GRAN_NVAL_H(v) (((v) >> 8) & 0x3)
-
-#define CMN_PWM_CLK_CTRL 0x0402
-#define PWM_CLK_CTRL_MASK 0x3
-
-#define IATOVAL_NSEC 20000 /* unit: ns */
-#define UNIPRO_PCLK_PERIOD(ufs) (NSEC_PER_SEC / ufs->pclk_rate)
-
-struct exynos_ufs;
-
-/* vendor specific pre-defined parameters */
-#define SLOW 1
-#define FAST 2
-
-#define RX_ADV_FINE_GRAN_SUP_EN 0x1
-#define RX_ADV_FINE_GRAN_STEP_VAL 0x3
-#define RX_ADV_MIN_ACTV_TIME_CAP 0x9
-
-#define PA_GRANULARITY_VAL 0x6
-#define PA_TACTIVATE_VAL 0x3
-#define PA_HIBERN8TIME_VAL 0x20
-
-#define PCLK_AVAIL_MIN 70000000
-#define PCLK_AVAIL_MAX 167000000
-
-struct exynos_ufs_uic_attr {
- /* TX Attributes */
- unsigned int tx_trailingclks;
- unsigned int tx_dif_p_nsec;
- unsigned int tx_dif_n_nsec;
- unsigned int tx_high_z_cnt_nsec;
- unsigned int tx_base_unit_nsec;
- unsigned int tx_gran_unit_nsec;
- unsigned int tx_sleep_cnt;
- unsigned int tx_min_activatetime;
- /* RX Attributes */
- unsigned int rx_filler_enable;
- unsigned int rx_dif_p_nsec;
- unsigned int rx_hibern8_wait_nsec;
- unsigned int rx_base_unit_nsec;
- unsigned int rx_gran_unit_nsec;
- unsigned int rx_sleep_cnt;
- unsigned int rx_stall_cnt;
- unsigned int rx_hs_g1_sync_len_cap;
- unsigned int rx_hs_g2_sync_len_cap;
- unsigned int rx_hs_g3_sync_len_cap;
- unsigned int rx_hs_g1_prep_sync_len_cap;
- unsigned int rx_hs_g2_prep_sync_len_cap;
- unsigned int rx_hs_g3_prep_sync_len_cap;
- /* Common Attributes */
- unsigned int cmn_pwm_clk_ctrl;
- /* Internal Attributes */
- unsigned int pa_dbg_option_suite;
- /* Changeable Attributes */
- unsigned int rx_adv_fine_gran_sup_en;
- unsigned int rx_adv_fine_gran_step;
- unsigned int rx_min_actv_time_cap;
- unsigned int rx_hibern8_time_cap;
- unsigned int rx_adv_min_actv_time_cap;
- unsigned int rx_adv_hibern8_time_cap;
- unsigned int pa_granularity;
- unsigned int pa_tactivate;
- unsigned int pa_hibern8time;
-};
-
-struct exynos_ufs_drv_data {
- const struct ufs_hba_variant_ops *vops;
- struct exynos_ufs_uic_attr *uic_attr;
- unsigned int quirks;
- unsigned int opts;
- /* SoC's specific operations */
- int (*drv_init)(struct device *dev, struct exynos_ufs *ufs);
- int (*pre_link)(struct exynos_ufs *ufs);
- int (*post_link)(struct exynos_ufs *ufs);
- int (*pre_pwr_change)(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr);
- int (*post_pwr_change)(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr);
- int (*pre_hce_enable)(struct exynos_ufs *ufs);
- int (*post_hce_enable)(struct exynos_ufs *ufs);
-};
-
-struct ufs_phy_time_cfg {
- u32 tx_linereset_p;
- u32 tx_linereset_n;
- u32 tx_high_z_cnt;
- u32 tx_base_n_val;
- u32 tx_gran_n_val;
- u32 tx_sleep_cnt;
- u32 rx_linereset;
- u32 rx_hibern8_wait;
- u32 rx_base_n_val;
- u32 rx_gran_n_val;
- u32 rx_sleep_cnt;
- u32 rx_stall_cnt;
-};
-
-struct exynos_ufs {
- struct ufs_hba *hba;
- struct phy *phy;
- void __iomem *reg_hci;
- void __iomem *reg_unipro;
- void __iomem *reg_ufsp;
- struct clk *clk_hci_core;
- struct clk *clk_unipro_main;
- struct clk *clk_apb;
- u32 pclk_rate;
- u32 pclk_div;
- u32 pclk_avail_min;
- u32 pclk_avail_max;
- unsigned long mclk_rate;
- int avail_ln_rx;
- int avail_ln_tx;
- int rx_sel_idx;
- struct ufs_pa_layer_attr dev_req_params;
- struct ufs_phy_time_cfg t_cfg;
- ktime_t entry_hibern8_t;
- const struct exynos_ufs_drv_data *drv_data;
- struct regmap *sysreg;
- u32 shareability_reg_offset;
-
- u32 opts;
-#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
-#define EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB BIT(1)
-#define EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL BIT(2)
-#define EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX BIT(3)
-#define EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER BIT(4)
-#define EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR BIT(5)
-};
-
-#define for_each_ufs_rx_lane(ufs, i) \
- for (i = (ufs)->rx_sel_idx; \
- i < (ufs)->rx_sel_idx + (ufs)->avail_ln_rx; i++)
-#define for_each_ufs_tx_lane(ufs, i) \
- for (i = 0; i < (ufs)->avail_ln_tx; i++)
-
-#define EXYNOS_UFS_MMIO_FUNC(name) \
-static inline void name##_writel(struct exynos_ufs *ufs, u32 val, u32 reg)\
-{ \
- writel(val, ufs->reg_##name + reg); \
-} \
- \
-static inline u32 name##_readl(struct exynos_ufs *ufs, u32 reg) \
-{ \
- return readl(ufs->reg_##name + reg); \
-}
-
-EXYNOS_UFS_MMIO_FUNC(hci);
-EXYNOS_UFS_MMIO_FUNC(unipro);
-EXYNOS_UFS_MMIO_FUNC(ufsp);
-#undef EXYNOS_UFS_MMIO_FUNC
-
-long exynos_ufs_calc_time_cntr(struct exynos_ufs *, long);
-
-static inline void exynos_ufs_enable_ov_tm(struct ufs_hba *hba)
-{
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), TRUE);
-}
-
-static inline void exynos_ufs_disable_ov_tm(struct ufs_hba *hba)
-{
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OV_TM), FALSE);
-}
-
-static inline void exynos_ufs_enable_dbg_mode(struct ufs_hba *hba)
-{
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), TRUE);
-}
-
-static inline void exynos_ufs_disable_dbg_mode(struct ufs_hba *hba)
-{
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), FALSE);
-}
-
-#endif /* _UFS_EXYNOS_H_ */
diff --git a/drivers/scsi/ufs/ufs-fault-injection.c b/drivers/scsi/ufs/ufs-fault-injection.c
deleted file mode 100644
index 7ac7c4e7ff83..000000000000
--- a/drivers/scsi/ufs/ufs-fault-injection.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/kconfig.h>
-#include <linux/types.h>
-#include <linux/fault-inject.h>
-#include <linux/module.h>
-#include "ufs-fault-injection.h"
-
-static int ufs_fault_get(char *buffer, const struct kernel_param *kp);
-static int ufs_fault_set(const char *val, const struct kernel_param *kp);
-
-static const struct kernel_param_ops ufs_fault_ops = {
- .get = ufs_fault_get,
- .set = ufs_fault_set,
-};
-
-enum { FAULT_INJ_STR_SIZE = 80 };
-
-/*
- * For more details about fault injection, please refer to
- * Documentation/fault-injection/fault-injection.rst.
- */
-static char g_trigger_eh_str[FAULT_INJ_STR_SIZE];
-module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644);
-MODULE_PARM_DESC(trigger_eh,
- "Fault injection. trigger_eh=<interval>,<probability>,<space>,<times>");
-static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr);
-
-static char g_timeout_str[FAULT_INJ_STR_SIZE];
-module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644);
-MODULE_PARM_DESC(timeout,
- "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
-static DECLARE_FAULT_ATTR(ufs_timeout_attr);
-
-static int ufs_fault_get(char *buffer, const struct kernel_param *kp)
-{
- const char *fault_str = kp->arg;
-
- return sysfs_emit(buffer, "%s\n", fault_str);
-}
-
-static int ufs_fault_set(const char *val, const struct kernel_param *kp)
-{
- struct fault_attr *attr = NULL;
-
- if (kp->arg == g_trigger_eh_str)
- attr = &ufs_trigger_eh_attr;
- else if (kp->arg == g_timeout_str)
- attr = &ufs_timeout_attr;
-
- if (WARN_ON_ONCE(!attr))
- return -EINVAL;
-
- if (!setup_fault_attr(attr, (char *)val))
- return -EINVAL;
-
- strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE);
-
- return 0;
-}
-
-bool ufs_trigger_eh(void)
-{
- return should_fail(&ufs_trigger_eh_attr, 1);
-}
-
-bool ufs_fail_completion(void)
-{
- return should_fail(&ufs_timeout_attr, 1);
-}
diff --git a/drivers/scsi/ufs/ufs-fault-injection.h b/drivers/scsi/ufs/ufs-fault-injection.h
deleted file mode 100644
index 6d0cd8e10c87..000000000000
--- a/drivers/scsi/ufs/ufs-fault-injection.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _UFS_FAULT_INJECTION_H
-#define _UFS_FAULT_INJECTION_H
-
-#include <linux/kconfig.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION
-bool ufs_trigger_eh(void);
-bool ufs_fail_completion(void);
-#else
-static inline bool ufs_trigger_eh(void)
-{
- return false;
-}
-
-static inline bool ufs_fail_completion(void)
-{
- return false;
-}
-#endif
-
-#endif /* _UFS_FAULT_INJECTION_H */
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
deleted file mode 100644
index 8c7e8d321746..000000000000
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ /dev/null
@@ -1,599 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HiSilicon Hixxxx UFS Driver
- *
- * Copyright (c) 2016-2017 Linaro Ltd.
- * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
- */
-
-#include <linux/time.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/reset.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "unipro.h"
-#include "ufs-hisi.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
-
-static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
-{
- int err = 0;
- u32 tx_fsm_val_0 = 0;
- u32 tx_fsm_val_1 = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
-
- do {
- err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
- &tx_fsm_val_0);
- err |= ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
- if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
- tx_fsm_val_1 == TX_FSM_HIBERN8))
- break;
-
- /* sleep for max. 200us */
- usleep_range(100, 200);
- } while (time_before(jiffies, timeout));
-
- /*
- * we might have scheduled out for long during polling so
- * check the state again.
- */
- if (time_after(jiffies, timeout)) {
- err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
- &tx_fsm_val_0);
- err |= ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
- }
-
- if (err) {
- dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
- __func__, err);
- } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
- tx_fsm_val_1 != TX_FSM_HIBERN8) {
- err = -1;
- dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
- __func__, tx_fsm_val_0, tx_fsm_val_1);
- }
-
- return err;
-}
-
-static void ufs_hisi_clk_init(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
- if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
- mdelay(1);
- /* use abb clk */
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
- /* open mphy ref clk */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
-}
-
-static void ufs_hisi_soc_init(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
- u32 reg;
-
- if (!IS_ERR(host->rst))
- reset_control_assert(host->rst);
-
- /* HC_PSW powerup */
- ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
- udelay(10);
- /* notify PWR ready */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
- ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
- UFS_DEVICE_RESET_CTRL);
-
- reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
- reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
- /* set cfg clk freq */
- ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
- /* set ref clk freq */
- ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
- /* bypass ufs clk gate */
- ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
- CLOCK_GATE_BYPASS);
- ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
-
- /* open psw clk */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
- /* disable ufshc iso */
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
- /* disable phy iso */
- ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
- /* notice iso disable */
- ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
-
- /* disable lp_reset_n */
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
- mdelay(1);
-
- ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
- UFS_DEVICE_RESET_CTRL);
-
- msleep(20);
-
- /*
- * enable the fix of linereset recovery,
- * and enable rx_reset/tx_rest beat
- * enable ref_clk_en override(bit5) &
- * override value = 1(bit4), with mask
- */
- ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
-
- if (!IS_ERR(host->rst))
- reset_control_deassert(host->rst);
-}
-
-static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
- int err;
- uint32_t value;
- uint32_t reg;
-
- /* Unipro VS_mphy_disable */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
- /* PA_HSSeries */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
- /* MPHY CBRATESEL */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
- /* MPHY CBOVRCTRL2 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
- /* MPHY CBOVRCTRL3 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
-
- if (host->caps & UFS_HISI_CAP_PHY10nm) {
- /* MPHY CBOVRCTRL4 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
- /* MPHY CBOVRCTRL5 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
- }
-
- /* Unipro VS_MphyCfgUpdt */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
- /* MPHY RXOVRCTRL4 rx0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
- /* MPHY RXOVRCTRL4 rx1 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
- /* MPHY RXOVRCTRL5 rx0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
- /* MPHY RXOVRCTRL5 rx1 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
- /* MPHY RXSQCONTROL rx0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
- /* MPHY RXSQCONTROL rx1 */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
- /* Unipro VS_MphyCfgUpdt */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
-
- if (host->caps & UFS_HISI_CAP_PHY10nm) {
- /* RX_Hibern8Time_Capability*/
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
- /* RX_Hibern8Time_Capability*/
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
- /* RX_Min_ActivateTime */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
- /* RX_Min_ActivateTime*/
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
- } else {
- /* Tactive RX */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
- /* Tactive RX */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
- }
-
- /* Gear3 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
- /* Gear3 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
- /* Gear2 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
- /* Gear2 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
- /* Gear1 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
- /* Gear1 Synclength */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
- /* Thibernate Tx */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
- /* Thibernate Tx */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
- /* Unipro VS_mphy_disable */
- ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
- if (value != 0x1)
- dev_info(hba->dev,
- "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
-
- /* Unipro VS_mphy_disable */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
- err = ufs_hisi_check_hibern8(hba);
- if (err)
- dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
-
- if (!(host->caps & UFS_HISI_CAP_PHY10nm))
- ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
-
- /* disable auto H8 */
- reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
- reg = reg & (~UFS_AHIT_AH8ITV_MASK);
- ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
-
- /* Unipro PA_Local_TX_LCC_Enable */
- ufshcd_disable_host_tx_lcc(hba);
- /* close Unipro VS_Mk2ExtnSupport */
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
- ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
- if (value != 0) {
- /* Ensure close success */
- dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
- }
-
- return err;
-}
-
-static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- /* Unipro DL_AFC0CreditThreshold */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
- /* Unipro DL_TC0OutAckThreshold */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
- /* Unipro DL_TC0TXFCThreshold */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
-
- /* not bypass ufs clk gate */
- ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
- CLOCK_GATE_BYPASS);
- ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
- UFS_SYSCTRL);
-
- /* select received symbol cnt */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
- /* reset counter0 and enable */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
-
- return 0;
-}
-
-static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
-
- switch (status) {
- case PRE_CHANGE:
- err = ufs_hisi_link_startup_pre_change(hba);
- break;
- case POST_CHANGE:
- err = ufs_hisi_link_startup_post_change(hba);
- break;
- default:
- break;
- }
-
- return err;
-}
-
-static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
-{
- ufshcd_init_pwr_dev_param(hisi_param);
-}
-
-static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- if (host->caps & UFS_HISI_CAP_PHY10nm) {
- /*
- * Boston platform need to set SaveConfigTime to 0x13,
- * and change sync length to maximum value
- */
- /* VS_DebugSaveConfigTime */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
- /* g1 sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
- /* g2 sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
- /* g3 sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
- /* PA_Hibern8Time */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
- /* PA_Tactivate */
- ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
- ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
- }
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
- pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
- /* VS_DebugSaveConfigTime */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
- /* sync length */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
- }
-
- /* update */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
- /* PA_TxSkip */
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
- /*PA_PWRModeUserData0 = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
- /*PA_PWRModeUserData1 = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
- /*PA_PWRModeUserData2 = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
- /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
- /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
- /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
- /*PA_PWRModeUserData3 = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
- /*PA_PWRModeUserData4 = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
- /*PA_PWRModeUserData5 = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
- /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
- /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
- /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
- ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
-}
-
-static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- struct ufs_dev_params ufs_hisi_cap;
- int ret = 0;
-
- if (!dev_req_params) {
- dev_err(hba->dev,
- "%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- switch (status) {
- case PRE_CHANGE:
- ufs_hisi_set_dev_cap(&ufs_hisi_cap);
- ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
- dev_max_params, dev_req_params);
- if (ret) {
- dev_err(hba->dev,
- "%s: failed to determine capabilities\n", __func__);
- goto out;
- }
-
- ufs_hisi_pwr_change_pre_change(hba);
- break;
- case POST_CHANGE:
- break;
- default:
- ret = -EINVAL;
- break;
- }
-out:
- return ret;
-}
-
-static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
- enum ufs_notify_change_status status)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- if (status == PRE_CHANGE)
- return 0;
-
- if (pm_op == UFS_RUNTIME_PM)
- return 0;
-
- if (host->in_suspend) {
- WARN_ON(1);
- return 0;
- }
-
- ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
- udelay(10);
- /* set ref_dig_clk override of PHY PCS to 0 */
- ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
-
- host->in_suspend = true;
-
- return 0;
-}
-
-static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct ufs_hisi_host *host = ufshcd_get_variant(hba);
-
- if (!host->in_suspend)
- return 0;
-
- /* set ref_dig_clk override of PHY PCS to 1 */
- ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
- udelay(10);
- ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
-
- host->in_suspend = false;
- return 0;
-}
-
-static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
-{
- struct device *dev = host->hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- /* get resource of ufs sys ctrl */
- host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
- return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
-}
-
-static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
-{
- hba->rpm_lvl = UFS_PM_LVL_1;
- hba->spm_lvl = UFS_PM_LVL_3;
-}
-
-/**
- * ufs_hisi_init_common
- * @hba: host controller instance
- */
-static int ufs_hisi_init_common(struct ufs_hba *hba)
-{
- int err = 0;
- struct device *dev = hba->dev;
- struct ufs_hisi_host *host;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host)
- return -ENOMEM;
-
- host->hba = hba;
- ufshcd_set_variant(hba, host);
-
- host->rst = devm_reset_control_get(dev, "rst");
- if (IS_ERR(host->rst)) {
- dev_err(dev, "%s: failed to get reset control\n", __func__);
- err = PTR_ERR(host->rst);
- goto error;
- }
-
- ufs_hisi_set_pm_lvl(hba);
-
- err = ufs_hisi_get_resource(host);
- if (err)
- goto error;
-
- return 0;
-
-error:
- ufshcd_set_variant(hba, NULL);
- return err;
-}
-
-static int ufs_hi3660_init(struct ufs_hba *hba)
-{
- int ret = 0;
- struct device *dev = hba->dev;
-
- ret = ufs_hisi_init_common(hba);
- if (ret) {
- dev_err(dev, "%s: ufs common init fail\n", __func__);
- return ret;
- }
-
- ufs_hisi_clk_init(hba);
-
- ufs_hisi_soc_init(hba);
-
- return 0;
-}
-
-static int ufs_hi3670_init(struct ufs_hba *hba)
-{
- int ret = 0;
- struct device *dev = hba->dev;
- struct ufs_hisi_host *host;
-
- ret = ufs_hisi_init_common(hba);
- if (ret) {
- dev_err(dev, "%s: ufs common init fail\n", __func__);
- return ret;
- }
-
- ufs_hisi_clk_init(hba);
-
- ufs_hisi_soc_init(hba);
-
- /* Add cap for 10nm PHY variant on HI3670 SoC */
- host = ufshcd_get_variant(hba);
- host->caps |= UFS_HISI_CAP_PHY10nm;
-
- return 0;
-}
-
-static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
- .name = "hi3660",
- .init = ufs_hi3660_init,
- .link_startup_notify = ufs_hisi_link_startup_notify,
- .pwr_change_notify = ufs_hisi_pwr_change_notify,
- .suspend = ufs_hisi_suspend,
- .resume = ufs_hisi_resume,
-};
-
-static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
- .name = "hi3670",
- .init = ufs_hi3670_init,
- .link_startup_notify = ufs_hisi_link_startup_notify,
- .pwr_change_notify = ufs_hisi_pwr_change_notify,
- .suspend = ufs_hisi_suspend,
- .resume = ufs_hisi_resume,
-};
-
-static const struct of_device_id ufs_hisi_of_match[] = {
- { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
- { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
-
-static int ufs_hisi_probe(struct platform_device *pdev)
-{
- const struct of_device_id *of_id;
-
- of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
-
- return ufshcd_pltfrm_init(pdev, of_id->data);
-}
-
-static int ufs_hisi_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct dev_pm_ops ufs_hisi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
-};
-
-static struct platform_driver ufs_hisi_pltform = {
- .probe = ufs_hisi_probe,
- .remove = ufs_hisi_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "ufshcd-hisi",
- .pm = &ufs_hisi_pm_ops,
- .of_match_table = of_match_ptr(ufs_hisi_of_match),
- },
-};
-module_platform_driver(ufs_hisi_pltform);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ufshcd-hisi");
-MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h
deleted file mode 100644
index 5a90c0f4e90c..000000000000
--- a/drivers/scsi/ufs/ufs-hisi.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2017, HiSilicon. All rights reserved.
- */
-
-#ifndef UFS_HISI_H_
-#define UFS_HISI_H_
-
-#define HBRN8_POLL_TOUT_MS 1000
-
-/*
- * ufs sysctrl specific define
- */
-#define PSW_POWER_CTRL (0x04)
-#define PHY_ISO_EN (0x08)
-#define HC_LP_CTRL (0x0C)
-#define PHY_CLK_CTRL (0x10)
-#define PSW_CLK_CTRL (0x14)
-#define CLOCK_GATE_BYPASS (0x18)
-#define RESET_CTRL_EN (0x1C)
-#define UFS_SYSCTRL (0x5C)
-#define UFS_DEVICE_RESET_CTRL (0x60)
-
-#define BIT_UFS_PSW_ISO_CTRL (1 << 16)
-#define BIT_UFS_PSW_MTCMOS_EN (1 << 0)
-#define BIT_UFS_REFCLK_ISO_EN (1 << 16)
-#define BIT_UFS_PHY_ISO_CTRL (1 << 0)
-#define BIT_SYSCTRL_LP_ISOL_EN (1 << 16)
-#define BIT_SYSCTRL_PWR_READY (1 << 8)
-#define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24)
-#define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8)
-#define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF)
-#define UFS_FREQ_CFG_CLK (0x39)
-#define BIT_SYSCTRL_PSW_CLK_EN (1 << 4)
-#define MASK_UFS_CLK_GATE_BYPASS (0x3F)
-#define BIT_SYSCTRL_LP_RESET_N (1 << 0)
-#define BIT_UFS_REFCLK_SRC_SEl (1 << 0)
-#define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16)
-#define MASK_UFS_DEVICE_RESET (0x1 << 16)
-#define BIT_UFS_DEVICE_RESET (0x1)
-
-/*
- * M-TX Configuration Attributes for Hixxxx
- */
-#define MPHY_TX_FSM_STATE 0x41
-#define TX_FSM_HIBERN8 0x1
-
-/*
- * Hixxxx UFS HC specific Registers
- */
-enum {
- UFS_REG_OCPTHRTL = 0xc0,
- UFS_REG_OOCPR = 0xc4,
-
- UFS_REG_CDACFG = 0xd0,
- UFS_REG_CDATX1 = 0xd4,
- UFS_REG_CDATX2 = 0xd8,
- UFS_REG_CDARX1 = 0xdc,
- UFS_REG_CDARX2 = 0xe0,
- UFS_REG_CDASTA = 0xe4,
-
- UFS_REG_LBMCFG = 0xf0,
- UFS_REG_LBMSTA = 0xf4,
- UFS_REG_UFSMODE = 0xf8,
-
- UFS_REG_HCLKDIV = 0xfc,
-};
-
-/* AHIT - Auto-Hibernate Idle Timer */
-#define UFS_AHIT_AH8ITV_MASK 0x3FF
-
-/* REG UFS_REG_OCPTHRTL definition */
-#define UFS_HCLKDIV_NORMAL_VALUE 0xE4
-
-/* vendor specific pre-defined parameters */
-#define SLOW 1
-#define FAST 2
-
-#define UFS_HISI_CAP_RESERVED BIT(0)
-#define UFS_HISI_CAP_PHY10nm BIT(1)
-
-struct ufs_hisi_host {
- struct ufs_hba *hba;
- void __iomem *ufs_sys_ctrl;
-
- struct reset_control *rst;
-
- uint64_t caps;
-
- bool in_suspend;
-};
-
-#define ufs_sys_ctrl_writel(host, val, reg) \
- writel((val), (host)->ufs_sys_ctrl + (reg))
-#define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg))
-#define ufs_sys_ctrl_set_bits(host, mask, reg) \
- ufs_sys_ctrl_writel( \
- (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg))
-#define ufs_sys_ctrl_clr_bits(host, mask, reg) \
- ufs_sys_ctrl_writel((host), \
- ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \
- (reg))
-
-#endif /* UFS_HISI_H_ */
diff --git a/drivers/scsi/ufs/ufs-hwmon.c b/drivers/scsi/ufs/ufs-hwmon.c
deleted file mode 100644
index 74855491dc8f..000000000000
--- a/drivers/scsi/ufs/ufs-hwmon.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * UFS hardware monitoring support
- * Copyright (c) 2021, Western Digital Corporation
- */
-
-#include <linux/hwmon.h>
-#include <linux/units.h>
-
-#include "ufshcd.h"
-
-struct ufs_hwmon_data {
- struct ufs_hba *hba;
- u8 mask;
-};
-
-static int ufs_read_temp_enable(struct ufs_hba *hba, u8 mask, long *val)
-{
- u32 ee_mask;
- int err;
-
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
- &ee_mask);
- if (err)
- return err;
-
- *val = (mask & ee_mask & MASK_EE_TOO_HIGH_TEMP) || (mask & ee_mask & MASK_EE_TOO_LOW_TEMP);
-
- return 0;
-}
-
-static int ufs_get_temp(struct ufs_hba *hba, enum attr_idn idn, long *val)
-{
- u32 value;
- int err;
-
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, 0, 0, &value);
- if (err)
- return err;
-
- if (value == 0)
- return -ENODATA;
-
- *val = ((long)value - 80) * MILLIDEGREE_PER_DEGREE;
-
- return 0;
-}
-
-static int ufs_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
- long *val)
-{
- struct ufs_hwmon_data *data = dev_get_drvdata(dev);
- struct ufs_hba *hba = data->hba;
- int err;
-
- down(&hba->host_sem);
-
- if (!ufshcd_is_user_access_allowed(hba)) {
- up(&hba->host_sem);
- return -EBUSY;
- }
-
- ufshcd_rpm_get_sync(hba);
-
- switch (attr) {
- case hwmon_temp_enable:
- err = ufs_read_temp_enable(hba, data->mask, val);
-
- break;
- case hwmon_temp_crit:
- err = ufs_get_temp(hba, QUERY_ATTR_IDN_HIGH_TEMP_BOUND, val);
-
- break;
- case hwmon_temp_lcrit:
- err = ufs_get_temp(hba, QUERY_ATTR_IDN_LOW_TEMP_BOUND, val);
-
- break;
- case hwmon_temp_input:
- err = ufs_get_temp(hba, QUERY_ATTR_IDN_CASE_ROUGH_TEMP, val);
-
- break;
- default:
- err = -EOPNOTSUPP;
-
- break;
- }
-
- ufshcd_rpm_put_sync(hba);
-
- up(&hba->host_sem);
-
- return err;
-}
-
-static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
- long val)
-{
- struct ufs_hwmon_data *data = dev_get_drvdata(dev);
- struct ufs_hba *hba = data->hba;
- int err;
-
- if (attr != hwmon_temp_enable)
- return -EINVAL;
-
- if (val != 0 && val != 1)
- return -EINVAL;
-
- down(&hba->host_sem);
-
- if (!ufshcd_is_user_access_allowed(hba)) {
- up(&hba->host_sem);
- return -EBUSY;
- }
-
- ufshcd_rpm_get_sync(hba);
-
- if (val == 1)
- err = ufshcd_update_ee_usr_mask(hba, MASK_EE_URGENT_TEMP, 0);
- else
- err = ufshcd_update_ee_usr_mask(hba, 0, MASK_EE_URGENT_TEMP);
-
- ufshcd_rpm_put_sync(hba);
-
- up(&hba->host_sem);
-
- return err;
-}
-
-static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
- int channel)
-{
- if (type != hwmon_temp)
- return 0;
-
- switch (attr) {
- case hwmon_temp_enable:
- return 0644;
- case hwmon_temp_crit:
- case hwmon_temp_lcrit:
- case hwmon_temp_input:
- return 0444;
- default:
- break;
- }
- return 0;
-}
-
-static const struct hwmon_channel_info *ufs_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT),
- NULL
-};
-
-static const struct hwmon_ops ufs_hwmon_ops = {
- .is_visible = ufs_hwmon_is_visible,
- .read = ufs_hwmon_read,
- .write = ufs_hwmon_write,
-};
-
-static const struct hwmon_chip_info ufs_hwmon_hba_info = {
- .ops = &ufs_hwmon_ops,
- .info = ufs_hwmon_info,
-};
-
-void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask)
-{
- struct device *dev = hba->dev;
- struct ufs_hwmon_data *data;
- struct device *hwmon;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return;
-
- data->hba = hba;
- data->mask = mask;
-
- hwmon = hwmon_device_register_with_info(dev, "ufs", data, &ufs_hwmon_hba_info, NULL);
- if (IS_ERR(hwmon)) {
- dev_warn(dev, "Failed to instantiate hwmon device\n");
- kfree(data);
- return;
- }
-
- hba->hwmon_device = hwmon;
-}
-
-void ufs_hwmon_remove(struct ufs_hba *hba)
-{
- struct ufs_hwmon_data *data;
-
- if (!hba->hwmon_device)
- return;
-
- data = dev_get_drvdata(hba->hwmon_device);
- hwmon_device_unregister(hba->hwmon_device);
- hba->hwmon_device = NULL;
- kfree(data);
-}
-
-void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask)
-{
- if (!hba->hwmon_device)
- return;
-
- if (ee_mask & MASK_EE_TOO_HIGH_TEMP)
- hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_max_alarm, 0);
-
- if (ee_mask & MASK_EE_TOO_LOW_TEMP)
- hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_min_alarm, 0);
-}
diff --git a/drivers/scsi/ufs/ufs-mediatek-trace.h b/drivers/scsi/ufs/ufs-mediatek-trace.h
deleted file mode 100644
index 895e82ea6ece..000000000000
--- a/drivers/scsi/ufs/ufs-mediatek-trace.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020 MediaTek Inc.
- */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ufs_mtk
-
-#if !defined(_TRACE_EVENT_UFS_MEDIATEK_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EVENT_UFS_MEDIATEK_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(ufs_mtk_event,
- TP_PROTO(unsigned int type, unsigned int data),
- TP_ARGS(type, data),
-
- TP_STRUCT__entry(
- __field(unsigned int, type)
- __field(unsigned int, data)
- ),
-
- TP_fast_assign(
- __entry->type = type;
- __entry->data = data;
- ),
-
- TP_printk("ufs:event=%u data=%u",
- __entry->type, __entry->data)
- );
-#endif
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
-#define TRACE_INCLUDE_FILE ufs-mediatek-trace
-#include <trace/define_trace.h>
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
deleted file mode 100644
index 5393b5c9dd9c..000000000000
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ /dev/null
@@ -1,1253 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2019 MediaTek Inc.
- * Authors:
- * Stanley Chu <stanley.chu@mediatek.com>
- * Peter Wang <peter.wang@mediatek.com>
- */
-
-#include <linux/arm-smccc.h>
-#include <linux/bitfield.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
-#include <linux/sched/clock.h>
-#include <linux/soc/mediatek/mtk_sip_svc.h>
-
-#include "ufshcd.h"
-#include "ufshcd-crypto.h"
-#include "ufshcd-pltfrm.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
-#include "ufs-mediatek.h"
-
-#define CREATE_TRACE_POINTS
-#include "ufs-mediatek-trace.h"
-
-#define ufs_mtk_smc(cmd, val, res) \
- arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
- cmd, val, 0, 0, 0, 0, 0, &(res))
-
-#define ufs_mtk_va09_pwr_ctrl(res, on) \
- ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
-
-#define ufs_mtk_crypto_ctrl(res, enable) \
- ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
-
-#define ufs_mtk_ref_clk_notify(on, res) \
- ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
-
-#define ufs_mtk_device_reset_ctrl(high, res) \
- ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
-
-static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
- UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
- UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
- UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
- END_FIX
-};
-
-static const struct of_device_id ufs_mtk_of_match[] = {
- { .compatible = "mediatek,mt8183-ufshci" },
- {},
-};
-
-static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
-}
-
-static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
-}
-
-static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
-}
-
-static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
-{
- u32 tmp;
-
- if (enable) {
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
- tmp = tmp |
- (1 << RX_SYMBOL_CLK_GATE_EN) |
- (1 << SYS_CLK_GATE_EN) |
- (1 << TX_CLK_GATE_EN);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
-
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
- tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
- } else {
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
- tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
- (1 << SYS_CLK_GATE_EN) |
- (1 << TX_CLK_GATE_EN));
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
-
- ufshcd_dme_get(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
- tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
- ufshcd_dme_set(hba,
- UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
- }
-}
-
-static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
-{
- struct arm_smccc_res res;
-
- ufs_mtk_crypto_ctrl(res, 1);
- if (res.a0) {
- dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
- __func__, res.a0);
- hba->caps &= ~UFSHCD_CAP_CRYPTO;
- }
-}
-
-static void ufs_mtk_host_reset(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- reset_control_assert(host->hci_reset);
- reset_control_assert(host->crypto_reset);
- reset_control_assert(host->unipro_reset);
-
- usleep_range(100, 110);
-
- reset_control_deassert(host->unipro_reset);
- reset_control_deassert(host->crypto_reset);
- reset_control_deassert(host->hci_reset);
-}
-
-static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
- struct reset_control **rc,
- char *str)
-{
- *rc = devm_reset_control_get(hba->dev, str);
- if (IS_ERR(*rc)) {
- dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
- str, PTR_ERR(*rc));
- *rc = NULL;
- }
-}
-
-static void ufs_mtk_init_reset(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- ufs_mtk_init_reset_control(hba, &host->hci_reset,
- "hci_rst");
- ufs_mtk_init_reset_control(hba, &host->unipro_reset,
- "unipro_rst");
- ufs_mtk_init_reset_control(hba, &host->crypto_reset,
- "crypto_rst");
-}
-
-static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- unsigned long flags;
-
- if (status == PRE_CHANGE) {
- if (host->unipro_lpm) {
- hba->vps->hba_enable_delay_us = 0;
- } else {
- hba->vps->hba_enable_delay_us = 600;
- ufs_mtk_host_reset(hba);
- }
-
- if (hba->caps & UFSHCD_CAP_CRYPTO)
- ufs_mtk_crypto_enable(hba);
-
- if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_writel(hba, 0,
- REG_AUTO_HIBERNATE_IDLE_TIMER);
- spin_unlock_irqrestore(hba->host->host_lock,
- flags);
-
- hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
- hba->ahit = 0;
- }
- }
-
- return 0;
-}
-
-static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct device *dev = hba->dev;
- struct device_node *np = dev->of_node;
- int err = 0;
-
- host->mphy = devm_of_phy_get_by_index(dev, np, 0);
-
- if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
- /*
- * UFS driver might be probed before the phy driver does.
- * In that case we would like to return EPROBE_DEFER code.
- */
- err = -EPROBE_DEFER;
- dev_info(dev,
- "%s: required phy hasn't probed yet. err = %d\n",
- __func__, err);
- } else if (IS_ERR(host->mphy)) {
- err = PTR_ERR(host->mphy);
- if (err != -ENODEV) {
- dev_info(dev, "%s: PHY get failed %d\n", __func__,
- err);
- }
- }
-
- if (err)
- host->mphy = NULL;
- /*
- * Allow unbound mphy because not every platform needs specific
- * mphy control.
- */
- if (err == -ENODEV)
- err = 0;
-
- return err;
-}
-
-static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct arm_smccc_res res;
- ktime_t timeout, time_checked;
- u32 value;
-
- if (host->ref_clk_enabled == on)
- return 0;
-
- if (on) {
- ufs_mtk_ref_clk_notify(on, res);
- ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
- } else {
- ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
- ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
- }
-
- /* Wait for ack */
- timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
- do {
- time_checked = ktime_get();
- value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
-
- /* Wait until ack bit equals to req bit */
- if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
- goto out;
-
- usleep_range(100, 200);
- } while (ktime_before(time_checked, timeout));
-
- dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
-
- ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
-
- return -ETIMEDOUT;
-
-out:
- host->ref_clk_enabled = on;
- if (on)
- ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
- else
- ufs_mtk_ref_clk_notify(on, res);
-
- return 0;
-}
-
-static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
- u16 gating_us)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- if (hba->dev_info.clk_gating_wait_us) {
- host->ref_clk_gating_wait_us =
- hba->dev_info.clk_gating_wait_us;
- } else {
- host->ref_clk_gating_wait_us = gating_us;
- }
-
- host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
-}
-
-static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
- ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
- ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
- ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
- ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
- ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
- } else {
- ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
- }
-}
-
-static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
- unsigned long retry_ms)
-{
- u64 timeout, time_checked;
- u32 val, sm;
- bool wait_idle;
-
- /* cannot use plain ktime_get() in suspend */
- timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
-
- /* wait a specific time after check base */
- udelay(10);
- wait_idle = false;
-
- do {
- time_checked = ktime_get_mono_fast_ns();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
-
- sm = val & 0x1f;
-
- /*
- * if state is in H8 enter and H8 enter confirm
- * wait until return to idle state.
- */
- if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
- wait_idle = true;
- udelay(50);
- continue;
- } else if (!wait_idle)
- break;
-
- if (wait_idle && (sm == VS_HCE_BASE))
- break;
- } while (time_checked < timeout);
-
- if (wait_idle && sm != VS_HCE_BASE)
- dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
-}
-
-static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
- unsigned long max_wait_ms)
-{
- ktime_t timeout, time_checked;
- u32 val;
-
- timeout = ktime_add_ms(ktime_get(), max_wait_ms);
- do {
- time_checked = ktime_get();
- ufs_mtk_dbg_sel(hba);
- val = ufshcd_readl(hba, REG_UFS_PROBE);
- val = val >> 28;
-
- if (val == state)
- return 0;
-
- /* Sleep for max. 200us */
- usleep_range(100, 200);
- } while (ktime_before(time_checked, timeout));
-
- if (val == state)
- return 0;
-
- return -ETIMEDOUT;
-}
-
-static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct phy *mphy = host->mphy;
- struct arm_smccc_res res;
- int ret = 0;
-
- if (!mphy || !(on ^ host->mphy_powered_on))
- return 0;
-
- if (on) {
- if (ufs_mtk_is_va09_supported(hba)) {
- ret = regulator_enable(host->reg_va09);
- if (ret < 0)
- goto out;
- /* wait 200 us to stablize VA09 */
- usleep_range(200, 210);
- ufs_mtk_va09_pwr_ctrl(res, 1);
- }
- phy_power_on(mphy);
- } else {
- phy_power_off(mphy);
- if (ufs_mtk_is_va09_supported(hba)) {
- ufs_mtk_va09_pwr_ctrl(res, 0);
- ret = regulator_disable(host->reg_va09);
- if (ret < 0)
- goto out;
- }
- }
-out:
- if (ret) {
- dev_info(hba->dev,
- "failed to %s va09: %d\n",
- on ? "enable" : "disable",
- ret);
- } else {
- host->mphy_powered_on = on;
- }
-
- return ret;
-}
-
-static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
- struct clk **clk_out)
-{
- struct clk *clk;
- int err = 0;
-
- clk = devm_clk_get(dev, name);
- if (IS_ERR(clk))
- err = PTR_ERR(clk);
- else
- *clk_out = clk;
-
- return err;
-}
-
-static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct ufs_mtk_crypt_cfg *cfg;
- struct regulator *reg;
- int volt, ret;
-
- if (!ufs_mtk_is_boost_crypt_enabled(hba))
- return;
-
- cfg = host->crypt;
- volt = cfg->vcore_volt;
- reg = cfg->reg_vcore;
-
- ret = clk_prepare_enable(cfg->clk_crypt_mux);
- if (ret) {
- dev_info(hba->dev, "clk_prepare_enable(): %d\n",
- ret);
- return;
- }
-
- if (boost) {
- ret = regulator_set_voltage(reg, volt, INT_MAX);
- if (ret) {
- dev_info(hba->dev,
- "failed to set vcore to %d\n", volt);
- goto out;
- }
-
- ret = clk_set_parent(cfg->clk_crypt_mux,
- cfg->clk_crypt_perf);
- if (ret) {
- dev_info(hba->dev,
- "failed to set clk_crypt_perf\n");
- regulator_set_voltage(reg, 0, INT_MAX);
- goto out;
- }
- } else {
- ret = clk_set_parent(cfg->clk_crypt_mux,
- cfg->clk_crypt_lp);
- if (ret) {
- dev_info(hba->dev,
- "failed to set clk_crypt_lp\n");
- goto out;
- }
-
- ret = regulator_set_voltage(reg, 0, INT_MAX);
- if (ret) {
- dev_info(hba->dev,
- "failed to set vcore to MIN\n");
- }
- }
-out:
- clk_disable_unprepare(cfg->clk_crypt_mux);
-}
-
-static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
- struct clk **clk)
-{
- int ret;
-
- ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
- if (ret) {
- dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
- name, ret);
- }
-
- return ret;
-}
-
-static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct ufs_mtk_crypt_cfg *cfg;
- struct device *dev = hba->dev;
- struct regulator *reg;
- u32 volt;
-
- host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
- GFP_KERNEL);
- if (!host->crypt)
- goto disable_caps;
-
- reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
- if (IS_ERR(reg)) {
- dev_info(dev, "failed to get dvfsrc-vcore: %ld",
- PTR_ERR(reg));
- goto disable_caps;
- }
-
- if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
- &volt)) {
- dev_info(dev, "failed to get boost-crypt-vcore-min");
- goto disable_caps;
- }
-
- cfg = host->crypt;
- if (ufs_mtk_init_host_clk(hba, "crypt_mux",
- &cfg->clk_crypt_mux))
- goto disable_caps;
-
- if (ufs_mtk_init_host_clk(hba, "crypt_lp",
- &cfg->clk_crypt_lp))
- goto disable_caps;
-
- if (ufs_mtk_init_host_clk(hba, "crypt_perf",
- &cfg->clk_crypt_perf))
- goto disable_caps;
-
- cfg->reg_vcore = reg;
- cfg->vcore_volt = volt;
- host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
-
-disable_caps:
- return;
-}
-
-static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- host->reg_va09 = regulator_get(hba->dev, "va09");
- if (!host->reg_va09)
- dev_info(hba->dev, "failed to get va09");
- else
- host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
-}
-
-static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct device_node *np = hba->dev->of_node;
-
- if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
- ufs_mtk_init_boost_crypt(hba);
-
- if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
- ufs_mtk_init_va09_pwr_ctrl(hba);
-
- if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
- host->caps |= UFS_MTK_CAP_DISABLE_AH8;
-
- if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
- host->caps |= UFS_MTK_CAP_BROKEN_VCC;
-
- dev_info(hba->dev, "caps: 0x%x", host->caps);
-}
-
-static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- ufs_mtk_boost_crypt(hba, up);
- ufs_mtk_setup_ref_clk(hba, up);
-
- if (up)
- phy_power_on(host->mphy);
- else
- phy_power_off(host->mphy);
-}
-
-/**
- * ufs_mtk_setup_clocks - enables/disable clocks
- * @hba: host controller instance
- * @on: If true, enable clocks else disable them.
- * @status: PRE_CHANGE or POST_CHANGE notify
- *
- * Returns 0 on success, non-zero on failure.
- */
-static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- bool clk_pwr_off = false;
- int ret = 0;
-
- /*
- * In case ufs_mtk_init() is not yet done, simply ignore.
- * This ufs_mtk_setup_clocks() shall be called from
- * ufs_mtk_init() after init is done.
- */
- if (!host)
- return 0;
-
- if (!on && status == PRE_CHANGE) {
- if (ufshcd_is_link_off(hba)) {
- clk_pwr_off = true;
- } else if (ufshcd_is_link_hibern8(hba) ||
- (!ufshcd_can_hibern8_during_gating(hba) &&
- ufshcd_is_auto_hibern8_enabled(hba))) {
- /*
- * Gate ref-clk and poweroff mphy if link state is in
- * OFF or Hibern8 by either Auto-Hibern8 or
- * ufshcd_link_state_transition().
- */
- ret = ufs_mtk_wait_link_state(hba,
- VS_LINK_HIBERN8,
- 15);
- if (!ret)
- clk_pwr_off = true;
- }
-
- if (clk_pwr_off)
- ufs_mtk_scale_perf(hba, false);
- } else if (on && status == POST_CHANGE) {
- ufs_mtk_scale_perf(hba, true);
- }
-
- return ret;
-}
-
-static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- int ret, ver = 0;
-
- if (host->hw_ver.major)
- return;
-
- /* Set default (minimum) version anyway */
- host->hw_ver.major = 2;
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
- if (!ret) {
- if (ver >= UFS_UNIPRO_VER_1_8) {
- host->hw_ver.major = 3;
- /*
- * Fix HCI version for some platforms with
- * incorrect version
- */
- if (hba->ufs_version < ufshci_version(3, 0))
- hba->ufs_version = ufshci_version(3, 0);
- }
- }
-}
-
-static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
-{
- return hba->ufs_version;
-}
-
-/**
- * ufs_mtk_init - find other essential mmio bases
- * @hba: host controller instance
- *
- * Binds PHY with controller and powers up PHY enabling clocks
- * and regulators.
- *
- * Returns -EPROBE_DEFER if binding fails, returns negative error
- * on phy power up failure and returns zero on success.
- */
-static int ufs_mtk_init(struct ufs_hba *hba)
-{
- const struct of_device_id *id;
- struct device *dev = hba->dev;
- struct ufs_mtk_host *host;
- int err = 0;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- err = -ENOMEM;
- dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
- goto out;
- }
-
- host->hba = hba;
- ufshcd_set_variant(hba, host);
-
- id = of_match_device(ufs_mtk_of_match, dev);
- if (!id) {
- err = -EINVAL;
- goto out;
- }
-
- /* Initialize host capability */
- ufs_mtk_init_host_caps(hba);
-
- err = ufs_mtk_bind_mphy(hba);
- if (err)
- goto out_variant_clear;
-
- ufs_mtk_init_reset(hba);
-
- /* Enable runtime autosuspend */
- hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
-
- /* Enable clock-gating */
- hba->caps |= UFSHCD_CAP_CLK_GATING;
-
- /* Enable inline encryption */
- hba->caps |= UFSHCD_CAP_CRYPTO;
-
- /* Enable WriteBooster */
- hba->caps |= UFSHCD_CAP_WB_EN;
- hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
- hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
-
- if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
- hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
-
- /*
- * ufshcd_vops_init() is invoked after
- * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
- * phy clock setup is skipped.
- *
- * Enable phy clocks specifically here.
- */
- ufs_mtk_mphy_power_on(hba, true);
- ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
-
- host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
-
- goto out;
-
-out_variant_clear:
- ufshcd_set_variant(hba, NULL);
-out:
- return err;
-}
-
-static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct ufs_dev_params host_cap;
- int ret;
-
- ufshcd_init_pwr_dev_param(&host_cap);
- host_cap.hs_rx_gear = UFS_HS_G4;
- host_cap.hs_tx_gear = UFS_HS_G4;
-
- ret = ufshcd_get_pwr_dev_param(&host_cap,
- dev_max_params,
- dev_req_params);
- if (ret) {
- pr_info("%s: failed to determine capabilities\n",
- __func__);
- }
-
- if (host->hw_ver.major >= 3) {
- ret = ufshcd_dme_configure_adapt(hba,
- dev_req_params->gear_tx,
- PA_INITIAL_ADAPT);
- }
-
- return ret;
-}
-
-static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- int ret = 0;
-
- switch (stage) {
- case PRE_CHANGE:
- ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
- dev_req_params);
- break;
- case POST_CHANGE:
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
-{
- int ret;
- struct ufs_mtk_host *host = ufshcd_get_variant(hba);
-
- ret = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
- lpm ? 1 : 0);
- if (!ret || !lpm) {
- /*
- * Forcibly set as non-LPM mode if UIC commands is failed
- * to use default hba_enable_delay_us value for re-enabling
- * the host.
- */
- host->unipro_lpm = lpm;
- }
-
- return ret;
-}
-
-static int ufs_mtk_pre_link(struct ufs_hba *hba)
-{
- int ret;
- u32 tmp;
-
- ufs_mtk_get_controller_version(hba);
-
- ret = ufs_mtk_unipro_set_lpm(hba, false);
- if (ret)
- return ret;
-
- /*
- * Setting PA_Local_TX_LCC_Enable to 0 before link startup
- * to make sure that both host and device TX LCC are disabled
- * once link startup is completed.
- */
- ret = ufshcd_disable_host_tx_lcc(hba);
- if (ret)
- return ret;
-
- /* disable deep stall */
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
- if (ret)
- return ret;
-
- tmp &= ~(1 << 6);
-
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
-
- return ret;
-}
-
-static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
-{
- unsigned long flags;
- u32 ah_ms;
-
- if (ufshcd_is_clkgating_allowed(hba)) {
- if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
- ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
- hba->ahit);
- else
- ah_ms = 10;
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.delay_ms = ah_ms + 5;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-}
-
-static int ufs_mtk_post_link(struct ufs_hba *hba)
-{
- /* enable unipro clock gating feature */
- ufs_mtk_cfg_unipro_cg(hba, true);
-
- /* will be configured during probe hba */
- if (ufshcd_is_auto_hibern8_supported(hba))
- hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
-
- ufs_mtk_setup_clk_gating(hba);
-
- return 0;
-}
-
-static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage)
-{
- int ret = 0;
-
- switch (stage) {
- case PRE_CHANGE:
- ret = ufs_mtk_pre_link(hba);
- break;
- case POST_CHANGE:
- ret = ufs_mtk_post_link(hba);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int ufs_mtk_device_reset(struct ufs_hba *hba)
-{
- struct arm_smccc_res res;
-
- /* disable hba before device reset */
- ufshcd_hba_stop(hba);
-
- ufs_mtk_device_reset_ctrl(0, res);
-
- /*
- * The reset signal is active low. UFS devices shall detect
- * more than or equal to 1us of positive or negative RST_n
- * pulse width.
- *
- * To be on safe side, keep the reset low for at least 10us.
- */
- usleep_range(10, 15);
-
- ufs_mtk_device_reset_ctrl(1, res);
-
- /* Some devices may need time to respond to rst_n */
- usleep_range(10000, 15000);
-
- dev_info(hba->dev, "device reset done\n");
-
- return 0;
-}
-
-static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
-{
- int err;
-
- err = ufshcd_hba_enable(hba);
- if (err)
- return err;
-
- err = ufs_mtk_unipro_set_lpm(hba, false);
- if (err)
- return err;
-
- err = ufshcd_uic_hibern8_exit(hba);
- if (!err)
- ufshcd_set_link_active(hba);
- else
- return err;
-
- err = ufshcd_make_hba_operational(hba);
- if (err)
- return err;
-
- return 0;
-}
-
-static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
-{
- int err;
-
- err = ufs_mtk_unipro_set_lpm(hba, true);
- if (err) {
- /* Resume UniPro state for following error recovery */
- ufs_mtk_unipro_set_lpm(hba, false);
- return err;
- }
-
- return 0;
-}
-
-static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
-{
- if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
- return;
-
- if (lpm && !hba->vreg_info.vcc->enabled)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_IDLE);
- else if (!lpm)
- regulator_set_mode(hba->vreg_info.vccq2->reg,
- REGULATOR_MODE_NORMAL);
-}
-
-static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
-{
- unsigned long flags;
- int ret;
-
- /* disable auto-hibern8 */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* wait host return to idle state when auto-hibern8 off */
- ufs_mtk_wait_idle_state(hba, 5);
-
- ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
- if (ret)
- dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
-}
-
-static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
- enum ufs_notify_change_status status)
-{
- int err;
- struct arm_smccc_res res;
-
- if (status == PRE_CHANGE) {
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return 0;
- ufs_mtk_auto_hibern8_disable(hba);
- return 0;
- }
-
- if (ufshcd_is_link_hibern8(hba)) {
- err = ufs_mtk_link_set_lpm(hba);
- if (err)
- goto fail;
- }
-
- if (!ufshcd_is_link_active(hba)) {
- /*
- * Make sure no error will be returned to prevent
- * ufshcd_suspend() re-enabling regulators while vreg is still
- * in low-power mode.
- */
- ufs_mtk_vreg_set_lpm(hba, true);
- err = ufs_mtk_mphy_power_on(hba, false);
- if (err)
- goto fail;
- }
-
- if (ufshcd_is_link_off(hba))
- ufs_mtk_device_reset_ctrl(0, res);
-
- return 0;
-fail:
- /*
- * Set link as off state enforcedly to trigger
- * ufshcd_host_reset_and_restore() in ufshcd_suspend()
- * for completed host reset.
- */
- ufshcd_set_link_off(hba);
- return -EAGAIN;
-}
-
-static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- int err;
-
- err = ufs_mtk_mphy_power_on(hba, true);
- if (err)
- goto fail;
-
- ufs_mtk_vreg_set_lpm(hba, false);
-
- if (ufshcd_is_link_hibern8(hba)) {
- err = ufs_mtk_link_set_hpm(hba);
- if (err)
- goto fail;
- }
-
- return 0;
-fail:
- return ufshcd_link_recovery(hba);
-}
-
-static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
-
- ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
-
- ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
- REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
- "MPHY Ctrl ");
-
- /* Direct debugging information to REG_MTK_PROBE */
- ufs_mtk_dbg_sel(hba);
- ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
-}
-
-static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u16 mid = dev_info->wmanufacturerid;
-
- if (mid == UFS_VENDOR_SAMSUNG)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
-
- /*
- * Decide waiting time before gating reference clock and
- * after ungating reference clock according to vendors'
- * requirements.
- */
- if (mid == UFS_VENDOR_SAMSUNG)
- ufs_mtk_setup_ref_clk_wait_us(hba, 1);
- else if (mid == UFS_VENDOR_SKHYNIX)
- ufs_mtk_setup_ref_clk_wait_us(hba, 30);
- else if (mid == UFS_VENDOR_TOSHIBA)
- ufs_mtk_setup_ref_clk_wait_us(hba, 100);
- else
- ufs_mtk_setup_ref_clk_wait_us(hba,
- REFCLK_DEFAULT_WAIT_US);
-
- return 0;
-}
-
-static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
-{
- ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
-
- if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
- (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
- hba->vreg_info.vcc->always_on = true;
- /*
- * VCC will be kept always-on thus we don't
- * need any delay during regulator operations
- */
- hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
- }
-}
-
-static void ufs_mtk_event_notify(struct ufs_hba *hba,
- enum ufs_event_type evt, void *data)
-{
- unsigned int val = *(u32 *)data;
-
- trace_ufs_mtk_event(evt, val);
-}
-
-/*
- * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
- *
- * The variant operations configure the necessary controller and PHY
- * handshake during initialization.
- */
-static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
- .name = "mediatek.ufshci",
- .init = ufs_mtk_init,
- .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
- .setup_clocks = ufs_mtk_setup_clocks,
- .hce_enable_notify = ufs_mtk_hce_enable_notify,
- .link_startup_notify = ufs_mtk_link_startup_notify,
- .pwr_change_notify = ufs_mtk_pwr_change_notify,
- .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
- .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
- .suspend = ufs_mtk_suspend,
- .resume = ufs_mtk_resume,
- .dbg_register_dump = ufs_mtk_dbg_register_dump,
- .device_reset = ufs_mtk_device_reset,
- .event_notify = ufs_mtk_event_notify,
-};
-
-/**
- * ufs_mtk_probe - probe routine of the driver
- * @pdev: pointer to Platform device handle
- *
- * Return zero for success and non-zero for failure
- */
-static int ufs_mtk_probe(struct platform_device *pdev)
-{
- int err;
- struct device *dev = &pdev->dev;
- struct device_node *reset_node;
- struct platform_device *reset_pdev;
- struct device_link *link;
-
- reset_node = of_find_compatible_node(NULL, NULL,
- "ti,syscon-reset");
- if (!reset_node) {
- dev_notice(dev, "find ti,syscon-reset fail\n");
- goto skip_reset;
- }
- reset_pdev = of_find_device_by_node(reset_node);
- if (!reset_pdev) {
- dev_notice(dev, "find reset_pdev fail\n");
- goto skip_reset;
- }
- link = device_link_add(dev, &reset_pdev->dev,
- DL_FLAG_AUTOPROBE_CONSUMER);
- put_device(&reset_pdev->dev);
- if (!link) {
- dev_notice(dev, "add reset device_link fail\n");
- goto skip_reset;
- }
- /* supplier is not probed */
- if (link->status == DL_STATE_DORMANT) {
- err = -EPROBE_DEFER;
- goto out;
- }
-
-skip_reset:
- /* perform generic probe */
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
-
-out:
- if (err)
- dev_info(dev, "probe failed %d\n", err);
-
- of_node_put(reset_node);
- return err;
-}
-
-/**
- * ufs_mtk_remove - set driver_data of the device to NULL
- * @pdev: pointer to platform device handle
- *
- * Always return 0
- */
-static int ufs_mtk_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct dev_pm_ops ufs_mtk_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
-};
-
-static struct platform_driver ufs_mtk_pltform = {
- .probe = ufs_mtk_probe,
- .remove = ufs_mtk_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "ufshcd-mtk",
- .pm = &ufs_mtk_pm_ops,
- .of_match_table = ufs_mtk_of_match,
- },
-};
-
-MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
-MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
-MODULE_DESCRIPTION("MediaTek UFS Host Driver");
-MODULE_LICENSE("GPL v2");
-
-module_platform_driver(ufs_mtk_pltform);
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
deleted file mode 100644
index 414dca86c09f..000000000000
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2019 MediaTek Inc.
- */
-
-#ifndef _UFS_MEDIATEK_H
-#define _UFS_MEDIATEK_H
-
-#include <linux/bitops.h>
-#include <linux/soc/mediatek/mtk_sip_svc.h>
-
-/*
- * Vendor specific UFSHCI Registers
- */
-#define REG_UFS_REFCLK_CTRL 0x144
-#define REG_UFS_EXTREG 0x2100
-#define REG_UFS_MPHYCTRL 0x2200
-#define REG_UFS_MTK_IP_VER 0x2240
-#define REG_UFS_REJECT_MON 0x22AC
-#define REG_UFS_DEBUG_SEL 0x22C0
-#define REG_UFS_PROBE 0x22C8
-#define REG_UFS_DEBUG_SEL_B0 0x22D0
-#define REG_UFS_DEBUG_SEL_B1 0x22D4
-#define REG_UFS_DEBUG_SEL_B2 0x22D8
-#define REG_UFS_DEBUG_SEL_B3 0x22DC
-
-/*
- * Ref-clk control
- *
- * Values for register REG_UFS_REFCLK_CTRL
- */
-#define REFCLK_RELEASE 0x0
-#define REFCLK_REQUEST BIT(0)
-#define REFCLK_ACK BIT(1)
-
-#define REFCLK_REQ_TIMEOUT_US 3000
-#define REFCLK_DEFAULT_WAIT_US 32
-
-/*
- * Other attributes
- */
-#define VS_DEBUGCLOCKENABLE 0xD0A1
-#define VS_SAVEPOWERCONTROL 0xD0A6
-#define VS_UNIPROPOWERDOWNCONTROL 0xD0A8
-
-/*
- * Vendor specific link state
- */
-enum {
- VS_LINK_DISABLED = 0,
- VS_LINK_DOWN = 1,
- VS_LINK_UP = 2,
- VS_LINK_HIBERN8 = 3,
- VS_LINK_LOST = 4,
- VS_LINK_CFG = 5,
-};
-
-/*
- * Vendor specific host controller state
- */
-enum {
- VS_HCE_RESET = 0,
- VS_HCE_BASE = 1,
- VS_HCE_OOCPR_WAIT = 2,
- VS_HCE_DME_RESET = 3,
- VS_HCE_MIDDLE = 4,
- VS_HCE_DME_ENABLE = 5,
- VS_HCE_DEFAULTS = 6,
- VS_HIB_IDLEEN = 7,
- VS_HIB_ENTER = 8,
- VS_HIB_ENTER_CONF = 9,
- VS_HIB_MIDDLE = 10,
- VS_HIB_WAITTIMER = 11,
- VS_HIB_EXIT_CONF = 12,
- VS_HIB_EXIT = 13,
-};
-
-/*
- * SiP commands
- */
-#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276)
-#define UFS_MTK_SIP_VA09_PWR_CTRL BIT(0)
-#define UFS_MTK_SIP_DEVICE_RESET BIT(1)
-#define UFS_MTK_SIP_CRYPTO_CTRL BIT(2)
-#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3)
-
-/*
- * VS_DEBUGCLOCKENABLE
- */
-enum {
- TX_SYMBOL_CLK_REQ_FORCE = 5,
-};
-
-/*
- * VS_SAVEPOWERCONTROL
- */
-enum {
- RX_SYMBOL_CLK_GATE_EN = 0,
- SYS_CLK_GATE_EN = 2,
- TX_CLK_GATE_EN = 3,
-};
-
-/*
- * Host capability
- */
-enum ufs_mtk_host_caps {
- UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
- UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
- UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
- UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
-};
-
-struct ufs_mtk_crypt_cfg {
- struct regulator *reg_vcore;
- struct clk *clk_crypt_perf;
- struct clk *clk_crypt_mux;
- struct clk *clk_crypt_lp;
- int vcore_volt;
-};
-
-struct ufs_mtk_hw_ver {
- u8 step;
- u8 minor;
- u8 major;
-};
-
-struct ufs_mtk_host {
- struct phy *mphy;
- struct regulator *reg_va09;
- struct reset_control *hci_reset;
- struct reset_control *unipro_reset;
- struct reset_control *crypto_reset;
- struct ufs_hba *hba;
- struct ufs_mtk_crypt_cfg *crypt;
- struct ufs_mtk_hw_ver hw_ver;
- enum ufs_mtk_host_caps caps;
- bool mphy_powered_on;
- bool unipro_lpm;
- bool ref_clk_enabled;
- u16 ref_clk_ungating_wait_us;
- u16 ref_clk_gating_wait_us;
- u32 ip_ver;
-};
-
-#endif /* !_UFS_MEDIATEK_H */
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
deleted file mode 100644
index bbb0ad7590ec..000000000000
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ /dev/null
@@ -1,245 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Qualcomm ICE (Inline Crypto Engine) support.
- *
- * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
- * Copyright 2019 Google LLC
- */
-
-#include <linux/platform_device.h>
-#include <linux/qcom_scm.h>
-
-#include "ufshcd-crypto.h"
-#include "ufs-qcom.h"
-
-#define AES_256_XTS_KEY_SIZE 64
-
-/* QCOM ICE registers */
-
-#define QCOM_ICE_REG_CONTROL 0x0000
-#define QCOM_ICE_REG_RESET 0x0004
-#define QCOM_ICE_REG_VERSION 0x0008
-#define QCOM_ICE_REG_FUSE_SETTING 0x0010
-#define QCOM_ICE_REG_PARAMETERS_1 0x0014
-#define QCOM_ICE_REG_PARAMETERS_2 0x0018
-#define QCOM_ICE_REG_PARAMETERS_3 0x001C
-#define QCOM_ICE_REG_PARAMETERS_4 0x0020
-#define QCOM_ICE_REG_PARAMETERS_5 0x0024
-
-/* QCOM ICE v3.X only */
-#define QCOM_ICE_GENERAL_ERR_STTS 0x0040
-#define QCOM_ICE_INVALID_CCFG_ERR_STTS 0x0030
-#define QCOM_ICE_GENERAL_ERR_MASK 0x0044
-
-/* QCOM ICE v2.X only */
-#define QCOM_ICE_REG_NON_SEC_IRQ_STTS 0x0040
-#define QCOM_ICE_REG_NON_SEC_IRQ_MASK 0x0044
-
-#define QCOM_ICE_REG_NON_SEC_IRQ_CLR 0x0048
-#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME1 0x0050
-#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME2 0x0054
-#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME1 0x0058
-#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME2 0x005C
-#define QCOM_ICE_REG_STREAM1_BIST_ERROR_VEC 0x0060
-#define QCOM_ICE_REG_STREAM2_BIST_ERROR_VEC 0x0064
-#define QCOM_ICE_REG_STREAM1_BIST_FINISH_VEC 0x0068
-#define QCOM_ICE_REG_STREAM2_BIST_FINISH_VEC 0x006C
-#define QCOM_ICE_REG_BIST_STATUS 0x0070
-#define QCOM_ICE_REG_BYPASS_STATUS 0x0074
-#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
-#define QCOM_ICE_REG_ENDIAN_SWAP 0x1004
-#define QCOM_ICE_REG_TEST_BUS_CONTROL 0x1010
-#define QCOM_ICE_REG_TEST_BUS_REG 0x1014
-
-/* BIST ("built-in self-test"?) status flags */
-#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000
-
-#define QCOM_ICE_FUSE_SETTING_MASK 0x1
-#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
-#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
-
-#define qcom_ice_writel(host, val, reg) \
- writel((val), (host)->ice_mmio + (reg))
-#define qcom_ice_readl(host, reg) \
- readl((host)->ice_mmio + (reg))
-
-static bool qcom_ice_supported(struct ufs_qcom_host *host)
-{
- struct device *dev = host->hba->dev;
- u32 regval = qcom_ice_readl(host, QCOM_ICE_REG_VERSION);
- int major = regval >> 24;
- int minor = (regval >> 16) & 0xFF;
- int step = regval & 0xFFFF;
-
- /* For now this driver only supports ICE version 3. */
- if (major != 3) {
- dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
- major, minor, step);
- return false;
- }
-
- dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
- major, minor, step);
-
- /* If fuses are blown, ICE might not work in the standard way. */
- regval = qcom_ice_readl(host, QCOM_ICE_REG_FUSE_SETTING);
- if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
- QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
- QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
- dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
- return false;
- }
- return true;
-}
-
-int ufs_qcom_ice_init(struct ufs_qcom_host *host)
-{
- struct ufs_hba *hba = host->hba;
- struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
- int err;
-
- if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) &
- MASK_CRYPTO_SUPPORT))
- return 0;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice");
- if (!res) {
- dev_warn(dev, "ICE registers not found\n");
- goto disable;
- }
-
- if (!qcom_scm_ice_available()) {
- dev_warn(dev, "ICE SCM interface not found\n");
- goto disable;
- }
-
- host->ice_mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(host->ice_mmio)) {
- err = PTR_ERR(host->ice_mmio);
- dev_err(dev, "Failed to map ICE registers; err=%d\n", err);
- return err;
- }
-
- if (!qcom_ice_supported(host))
- goto disable;
-
- return 0;
-
-disable:
- dev_warn(dev, "Disabling inline encryption support\n");
- hba->caps &= ~UFSHCD_CAP_CRYPTO;
- return 0;
-}
-
-static void qcom_ice_low_power_mode_enable(struct ufs_qcom_host *host)
-{
- u32 regval;
-
- regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL);
- /*
- * Enable low power mode sequence
- * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
- */
- regval |= 0x7000;
- qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
-}
-
-static void qcom_ice_optimization_enable(struct ufs_qcom_host *host)
-{
- u32 regval;
-
- /* ICE Optimizations Enable Sequence */
- regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL);
- regval |= 0xD807100;
- /* ICE HPG requires delay before writing */
- udelay(5);
- qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
- udelay(5);
-}
-
-int ufs_qcom_ice_enable(struct ufs_qcom_host *host)
-{
- if (!(host->hba->caps & UFSHCD_CAP_CRYPTO))
- return 0;
- qcom_ice_low_power_mode_enable(host);
- qcom_ice_optimization_enable(host);
- return ufs_qcom_ice_resume(host);
-}
-
-/* Poll until all BIST bits are reset */
-static int qcom_ice_wait_bist_status(struct ufs_qcom_host *host)
-{
- int count;
- u32 reg;
-
- for (count = 0; count < 100; count++) {
- reg = qcom_ice_readl(host, QCOM_ICE_REG_BIST_STATUS);
- if (!(reg & QCOM_ICE_BIST_STATUS_MASK))
- break;
- udelay(50);
- }
- if (reg)
- return -ETIMEDOUT;
- return 0;
-}
-
-int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
-{
- int err;
-
- if (!(host->hba->caps & UFSHCD_CAP_CRYPTO))
- return 0;
-
- err = qcom_ice_wait_bist_status(host);
- if (err) {
- dev_err(host->hba->dev, "BIST status error (%d)\n", err);
- return err;
- }
- return 0;
-}
-
-/*
- * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires
- * vendor-specific SCM calls for this; it doesn't support the standard way.
- */
-int ufs_qcom_ice_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot)
-{
- union ufs_crypto_cap_entry cap;
- union {
- u8 bytes[AES_256_XTS_KEY_SIZE];
- u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
- } key;
- int i;
- int err;
-
- if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE))
- return qcom_scm_ice_invalidate_key(slot);
-
- /* Only AES-256-XTS has been tested so far. */
- cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
- if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
- cap.key_size != UFS_CRYPTO_KEY_SIZE_256) {
- dev_err_ratelimited(hba->dev,
- "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
- cap.algorithm_id, cap.key_size);
- return -EINVAL;
- }
-
- memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE);
-
- /*
- * The SCM call byte-swaps the 32-bit words of the key. So we have to
- * do the same, in order for the final key be correct.
- */
- for (i = 0; i < ARRAY_SIZE(key.words); i++)
- __cpu_to_be32s(&key.words[i]);
-
- err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE,
- QCOM_SCM_ICE_CIPHER_AES_256_XTS,
- cfg->data_unit_size);
- memzero_explicit(&key, sizeof(key));
- return err;
-}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
deleted file mode 100644
index 0d2e950d0865..000000000000
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ /dev/null
@@ -1,1581 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
- */
-
-#include <linux/acpi.h>
-#include <linux/time.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
-#include <linux/gpio/consumer.h>
-#include <linux/reset-controller.h>
-#include <linux/devfreq.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "unipro.h"
-#include "ufs-qcom.h"
-#include "ufshci.h"
-#include "ufs_quirks.h"
-#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
-
-enum {
- TSTBUS_UAWM,
- TSTBUS_UARM,
- TSTBUS_TXUC,
- TSTBUS_RXUC,
- TSTBUS_DFC,
- TSTBUS_TRLUT,
- TSTBUS_TMRLUT,
- TSTBUS_OCSC,
- TSTBUS_UTP_HCI,
- TSTBUS_COMBINED,
- TSTBUS_WRAPPER,
- TSTBUS_UNIPRO,
- TSTBUS_MAX,
-};
-
-static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-
-static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles);
-
-static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
-{
- return container_of(rcd, struct ufs_qcom_host, rcdev);
-}
-
-static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- const char *prefix, void *priv)
-{
- ufshcd_dump_regs(hba, offset, len * 4, prefix);
-}
-
-static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
-{
- int err = 0;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
- __func__, err);
-
- return err;
-}
-
-static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out, bool optional)
-{
- struct clk *clk;
- int err = 0;
-
- clk = devm_clk_get(dev, name);
- if (!IS_ERR(clk)) {
- *clk_out = clk;
- return 0;
- }
-
- err = PTR_ERR(clk);
-
- if (optional && err == -ENOENT) {
- *clk_out = NULL;
- return 0;
- }
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get %s err %d\n", name, err);
-
- return err;
-}
-
-static int ufs_qcom_host_clk_enable(struct device *dev,
- const char *name, struct clk *clk)
-{
- int err = 0;
-
- err = clk_prepare_enable(clk);
- if (err)
- dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
-
- return err;
-}
-
-static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
-{
- if (!host->is_lane_clks_enabled)
- return;
-
- clk_disable_unprepare(host->tx_l1_sync_clk);
- clk_disable_unprepare(host->tx_l0_sync_clk);
- clk_disable_unprepare(host->rx_l1_sync_clk);
- clk_disable_unprepare(host->rx_l0_sync_clk);
-
- host->is_lane_clks_enabled = false;
-}
-
-static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
-{
- int err = 0;
- struct device *dev = host->hba->dev;
-
- if (host->is_lane_clks_enabled)
- return 0;
-
- err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
- host->rx_l0_sync_clk);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
- host->tx_l0_sync_clk);
- if (err)
- goto disable_rx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
- host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
-
- host->is_lane_clks_enabled = true;
- goto out;
-
-disable_rx_l1:
- clk_disable_unprepare(host->rx_l1_sync_clk);
-disable_tx_l0:
- clk_disable_unprepare(host->tx_l0_sync_clk);
-disable_rx_l0:
- clk_disable_unprepare(host->rx_l0_sync_clk);
-out:
- return err;
-}
-
-static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
-{
- int err = 0;
- struct device *dev = host->hba->dev;
-
- if (has_acpi_companion(dev))
- return 0;
-
- err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
- &host->rx_l0_sync_clk, false);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
- &host->tx_l0_sync_clk, false);
- if (err)
- goto out;
-
- /* In case of single lane per direction, don't read lane1 clocks */
- if (host->hba->lanes_per_direction > 1) {
- err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk, false);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk, true);
- }
-out:
- return err;
-}
-
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
- u32 tx_lanes;
-
- return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-}
-
-static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
-{
- int err;
- u32 tx_fsm_val = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
-
- do {
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &tx_fsm_val);
- if (err || tx_fsm_val == TX_FSM_HIBERN8)
- break;
-
- /* sleep for max. 200us */
- usleep_range(100, 200);
- } while (time_before(jiffies, timeout));
-
- /*
- * we might have scheduled out for long during polling so
- * check the state again.
- */
- if (time_after(jiffies, timeout))
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &tx_fsm_val);
-
- if (err) {
- dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
- __func__, err);
- } else if (tx_fsm_val != TX_FSM_HIBERN8) {
- err = tx_fsm_val;
- dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
- __func__, err);
- }
-
- return err;
-}
-
-static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
-{
- ufshcd_rmwl(host->hba, QUNIPRO_SEL,
- ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
- REG_UFS_CFG1);
- /* make sure above configuration is applied before we return */
- mb();
-}
-
-/*
- * ufs_qcom_host_reset - reset host controller and PHY
- */
-static int ufs_qcom_host_reset(struct ufs_hba *hba)
-{
- int ret = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- bool reenable_intr = false;
-
- if (!host->core_reset) {
- dev_warn(hba->dev, "%s: reset control not set\n", __func__);
- goto out;
- }
-
- reenable_intr = hba->is_irq_enabled;
- disable_irq(hba->irq);
- hba->is_irq_enabled = false;
-
- ret = reset_control_assert(host->core_reset);
- if (ret) {
- dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
- __func__, ret);
- goto out;
- }
-
- /*
- * The hardware requirement for delay between assert/deassert
- * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
- * ~125us (4/32768). To be on the safe side add 200us delay.
- */
- usleep_range(200, 210);
-
- ret = reset_control_deassert(host->core_reset);
- if (ret)
- dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
- __func__, ret);
-
- usleep_range(1000, 1100);
-
- if (reenable_intr) {
- enable_irq(hba->irq);
- hba->is_irq_enabled = true;
- }
-
-out:
- return ret;
-}
-
-static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int ret = 0;
- bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
- ? true : false;
-
- /* Reset UFS Host Controller and PHY */
- ret = ufs_qcom_host_reset(hba);
- if (ret)
- dev_warn(hba->dev, "%s: host reset returned %d\n",
- __func__, ret);
-
- if (is_rate_B)
- phy_set_mode(phy, PHY_MODE_UFS_HS_B);
-
- /* phy initialization - calibrate the phy */
- ret = phy_init(phy);
- if (ret) {
- dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
- __func__, ret);
- goto out;
- }
-
- /* power on phy - start serdes and phy's power and clocks */
- ret = phy_power_on(phy);
- if (ret) {
- dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
- __func__, ret);
- goto out_disable_phy;
- }
-
- ufs_qcom_select_unipro_mode(host);
-
- return 0;
-
-out_disable_phy:
- phy_exit(phy);
-out:
- return ret;
-}
-
-/*
- * The UTP controller has a number of internal clock gating cells (CGCs).
- * Internal hardware sub-modules within the UTP controller control the CGCs.
- * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
- * in a specific operation, UTP controller CGCs are by default disabled and
- * this function enables them (after every UFS link startup) to save some power
- * leakage.
- */
-static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
-{
- ufshcd_writel(hba,
- ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
- REG_UFS_CFG2);
-
- /* Ensure that HW clock gating is enabled before next operations */
- mb();
-}
-
-static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
-
- switch (status) {
- case PRE_CHANGE:
- ufs_qcom_power_up_sequence(hba);
- /*
- * The PHY PLL output is the source of tx/rx lane symbol
- * clocks, hence, enable the lane clocks only after PHY
- * is initialized.
- */
- err = ufs_qcom_enable_lane_clks(host);
- break;
- case POST_CHANGE:
- /* check if UFS PHY moved from DISABLED to HIBERN8 */
- err = ufs_qcom_check_hibern8(hba);
- ufs_qcom_enable_hw_clk_gating(hba);
- ufs_qcom_ice_enable(host);
- break;
- default:
- dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
- err = -EINVAL;
- break;
- }
- return err;
-}
-
-/*
- * Returns zero for success and non-zero in case of a failure
- */
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer)
-{
- int ret = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_clk_info *clki;
- u32 core_clk_period_in_ns;
- u32 tx_clk_cycles_per_us = 0;
- unsigned long core_clk_rate = 0;
- u32 core_clk_cycles_per_us = 0;
-
- static u32 pwm_fr_table[][2] = {
- {UFS_PWM_G1, 0x1},
- {UFS_PWM_G2, 0x1},
- {UFS_PWM_G3, 0x1},
- {UFS_PWM_G4, 0x1},
- };
-
- static u32 hs_fr_table_rA[][2] = {
- {UFS_HS_G1, 0x1F},
- {UFS_HS_G2, 0x3e},
- {UFS_HS_G3, 0x7D},
- };
-
- static u32 hs_fr_table_rB[][2] = {
- {UFS_HS_G1, 0x24},
- {UFS_HS_G2, 0x49},
- {UFS_HS_G3, 0x92},
- };
-
- /*
- * The Qunipro controller does not use following registers:
- * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
- * UFS_REG_PA_LINK_STARTUP_TIMER
- * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
- * Aggregation logic.
- */
- if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
-
- if (gear == 0) {
- dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
- }
-
- list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
- }
-
- /* If frequency is smaller than 1MHz, set to 1MHz */
- if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
- core_clk_rate = DEFAULT_CLK_RATE_HZ;
-
- core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
- if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
- ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
-
- if (ufs_qcom_cap_qunipro(host))
- goto out;
-
- core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
- core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
- core_clk_period_in_ns &= MASK_CLK_NS_REG;
-
- switch (hs) {
- case FASTAUTO_MODE:
- case FAST_MODE:
- if (rate == PA_HS_MODE_A) {
- if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
- }
- tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
- } else if (rate == PA_HS_MODE_B) {
- if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
- }
- tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
- } else {
- dev_err(hba->dev, "%s: invalid rate = %d\n",
- __func__, rate);
- goto out_error;
- }
- break;
- case SLOWAUTO_MODE:
- case SLOW_MODE:
- if (gear > ARRAY_SIZE(pwm_fr_table)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(pwm_fr_table));
- goto out_error;
- }
- tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
- break;
- case UNCHANGED:
- default:
- dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
- }
-
- if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
- (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
- /* this register 2 fields shall be written at once */
- ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
- REG_UFS_TX_SYMBOL_CLK_NS_US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
-
- if (update_link_startup_timer) {
- ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_PA_LINK_STARTUP_TIMER);
- /*
- * make sure that this configuration is applied before
- * we return
- */
- mb();
- }
- goto out;
-
-out_error:
- ret = -EINVAL;
-out:
- return ret;
-}
-
-static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- switch (status) {
- case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- err = -EINVAL;
- goto out;
- }
-
- if (ufs_qcom_cap_qunipro(host))
- /*
- * set unipro core clock cycles to 150 & clear clock
- * divider
- */
- err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
- 150);
-
- /*
- * Some UFS devices (and may be host) have issues if LCC is
- * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
- * before link startup which will make sure that both host
- * and device TX LCC are disabled once link startup is
- * completed.
- */
- if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
- err = ufshcd_disable_host_tx_lcc(hba);
-
- break;
- case POST_CHANGE:
- ufs_qcom_link_startup_post_change(hba);
- break;
- default:
- break;
- }
-
-out:
- return err;
-}
-
-static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- /* reset gpio is optional */
- if (!host->device_reset)
- return;
-
- gpiod_set_value_cansleep(host->device_reset, asserted);
-}
-
-static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
- enum ufs_notify_change_status status)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
-
- if (status == PRE_CHANGE)
- return 0;
-
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
-
- /* reset the connected UFS device during power down */
- ufs_qcom_device_reset_ctrl(hba, true);
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- }
-
- return 0;
-}
-
-static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int err;
-
- if (ufs_qcom_is_link_off(hba)) {
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed PHY power on: %d\n",
- __func__, err);
- return err;
- }
-
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
-
- } else if (!ufs_qcom_is_link_active(hba)) {
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- return err;
- }
-
- err = ufs_qcom_ice_resume(host);
- if (err)
- return err;
-
- hba->is_sys_suspended = false;
- return 0;
-}
-
-static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
-{
- if (host->dev_ref_clk_ctrl_mmio &&
- (enable ^ host->is_dev_ref_clk_enabled)) {
- u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
-
- if (enable)
- temp |= host->dev_ref_clk_en_mask;
- else
- temp &= ~host->dev_ref_clk_en_mask;
-
- /*
- * If we are here to disable this clock it might be immediately
- * after entering into hibern8 in which case we need to make
- * sure that device ref_clk is active for specific time after
- * hibern8 enter.
- */
- if (!enable) {
- unsigned long gating_wait;
-
- gating_wait = host->hba->dev_info.clk_gating_wait_us;
- if (!gating_wait) {
- udelay(1);
- } else {
- /*
- * bRefClkGatingWaitTime defines the minimum
- * time for which the reference clock is
- * required by device during transition from
- * HS-MODE to LS-MODE or HIBERN8 state. Give it
- * more delay to be on the safe side.
- */
- gating_wait += 10;
- usleep_range(gating_wait, gating_wait + 10);
- }
- }
-
- writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
-
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
-
- /*
- * If we call hibern8 exit after this, we need to make sure that
- * device ref_clk is stable for at least 1us before the hibern8
- * exit command.
- */
- if (enable)
- udelay(1);
-
- host->is_dev_ref_clk_enabled = enable;
- }
-}
-
-static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_dev_params ufs_qcom_cap;
- int ret = 0;
-
- if (!dev_req_params) {
- pr_err("%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- switch (status) {
- case PRE_CHANGE:
- ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
- ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
-
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
- if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
- }
-
- ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
- dev_max_params,
- dev_req_params);
- if (ret) {
- pr_err("%s: failed to determine capabilities\n",
- __func__);
- goto out;
- }
-
- /* enable the device ref clock before changing to HS mode */
- if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
- ufshcd_is_hs_mode(dev_req_params))
- ufs_qcom_dev_ref_clk_ctrl(host, true);
-
- if (host->hw_ver.major >= 0x4) {
- ufshcd_dme_configure_adapt(hba,
- dev_req_params->gear_tx,
- PA_INITIAL_ADAPT);
- }
- break;
- case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate, false)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- /*
- * we return error code at the end of the routine,
- * but continue to configure UFS_PHY_TX_LANE_ENABLE
- * and bus voting as usual
- */
- ret = -EINVAL;
- }
-
- /* cache the power mode parameters to use internally */
- memcpy(&host->dev_req_params,
- dev_req_params, sizeof(*dev_req_params));
-
- /* disable the device ref clock if entered PWM mode */
- if (ufshcd_is_hs_mode(&hba->pwr_info) &&
- !ufshcd_is_hs_mode(dev_req_params))
- ufs_qcom_dev_ref_clk_ctrl(host, false);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-out:
- return ret;
-}
-
-static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
-{
- int err;
- u32 pa_vs_config_reg1;
-
- err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
- &pa_vs_config_reg1);
- if (err)
- goto out;
-
- /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
- err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
- (pa_vs_config_reg1 | (1 << 12)));
-
-out:
- return err;
-}
-
-static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
- err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
-
- if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
- hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
-
- return err;
-}
-
-static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (host->hw_ver.major == 0x1)
- return ufshci_version(1, 1);
- else
- return ufshci_version(2, 0);
-}
-
-/**
- * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
- * @hba: host controller instance
- *
- * QCOM UFS host controller might have some non standard behaviours (quirks)
- * than what is specified by UFSHCI specification. Advertise all such
- * quirks to standard UFS host controller driver so standard takes them into
- * account.
- */
-static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (host->hw_ver.major == 0x01) {
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
-
- if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
- hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
-
- hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
- }
-
- if (host->hw_ver.major == 0x2) {
- hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
-
- if (!ufs_qcom_cap_qunipro(host))
- /* Legacy UniPro mode still need following quirks */
- hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
- }
-}
-
-static void ufs_qcom_set_caps(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- hba->caps |= UFSHCD_CAP_CLK_SCALING;
- hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
- hba->caps |= UFSHCD_CAP_WB_EN;
- hba->caps |= UFSHCD_CAP_CRYPTO;
- hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
-
- if (host->hw_ver.major >= 0x2) {
- host->caps = UFS_QCOM_CAP_QUNIPRO |
- UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
- }
-}
-
-/**
- * ufs_qcom_setup_clocks - enables/disable clocks
- * @hba: host controller instance
- * @on: If true, enable clocks else disable them.
- * @status: PRE_CHANGE or POST_CHANGE notify
- *
- * Returns 0 on success, non-zero on failure.
- */
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- /*
- * In case ufs_qcom_init() is not yet done, simply ignore.
- * This ufs_qcom_setup_clocks() shall be called from
- * ufs_qcom_init() after init is done.
- */
- if (!host)
- return 0;
-
- switch (status) {
- case PRE_CHANGE:
- if (!on) {
- if (!ufs_qcom_is_link_active(hba)) {
- /* disable device ref_clk */
- ufs_qcom_dev_ref_clk_ctrl(host, false);
- }
- }
- break;
- case POST_CHANGE:
- if (on) {
- /* enable the device ref clock for HS mode*/
- if (ufshcd_is_hs_mode(&hba->pwr_info))
- ufs_qcom_dev_ref_clk_ctrl(host, true);
- }
- break;
- }
-
- return 0;
-}
-
-static int
-ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
-{
- struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
-
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
- ufs_qcom_assert_reset(host->hba);
- /* provide 1ms delay to let the reset pulse propagate. */
- usleep_range(1000, 1100);
- return 0;
-}
-
-static int
-ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
-{
- struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
-
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
- ufs_qcom_deassert_reset(host->hba);
-
- /*
- * after reset deassertion, phy will need all ref clocks,
- * voltage, current to settle down before starting serdes.
- */
- usleep_range(1000, 1100);
- return 0;
-}
-
-static const struct reset_control_ops ufs_qcom_reset_ops = {
- .assert = ufs_qcom_reset_assert,
- .deassert = ufs_qcom_reset_deassert,
-};
-
-#define ANDROID_BOOT_DEV_MAX 30
-static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
-
-#ifndef MODULE
-static int __init get_android_boot_dev(char *str)
-{
- strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
- return 1;
-}
-__setup("androidboot.bootdevice=", get_android_boot_dev);
-#endif
-
-/**
- * ufs_qcom_init - bind phy with controller
- * @hba: host controller instance
- *
- * Binds PHY with controller and powers up PHY enabling clocks
- * and regulators.
- *
- * Returns -EPROBE_DEFER if binding fails, returns negative error
- * on phy power up failure and returns zero on success.
- */
-static int ufs_qcom_init(struct ufs_hba *hba)
-{
- int err;
- struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct ufs_qcom_host *host;
- struct resource *res;
- struct ufs_clk_info *clki;
-
- if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
- return -ENODEV;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- err = -ENOMEM;
- dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
- }
-
- /* Make a two way bind between the qcom host and the hba */
- host->hba = hba;
- ufshcd_set_variant(hba, host);
-
- /* Setup the reset control of HCI */
- host->core_reset = devm_reset_control_get(hba->dev, "rst");
- if (IS_ERR(host->core_reset)) {
- err = PTR_ERR(host->core_reset);
- dev_warn(dev, "Failed to get reset control %d\n", err);
- host->core_reset = NULL;
- err = 0;
- }
-
- /* Fire up the reset controller. Failure here is non-fatal. */
- host->rcdev.of_node = dev->of_node;
- host->rcdev.ops = &ufs_qcom_reset_ops;
- host->rcdev.owner = dev->driver->owner;
- host->rcdev.nr_resets = 1;
- err = devm_reset_controller_register(dev, &host->rcdev);
- if (err) {
- dev_warn(dev, "Failed to register reset controller\n");
- err = 0;
- }
-
- /*
- * voting/devoting device ref_clk source is time consuming hence
- * skip devoting it during aggressive clock gating. This clock
- * will still be gated off during runtime suspend.
- */
- host->generic_phy = devm_phy_get(dev, "ufsphy");
-
- if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
- /*
- * UFS driver might be probed before the phy driver does.
- * In that case we would like to return EPROBE_DEFER code.
- */
- err = -EPROBE_DEFER;
- dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
- __func__, err);
- goto out_variant_clear;
- } else if (IS_ERR(host->generic_phy)) {
- if (has_acpi_companion(dev)) {
- host->generic_phy = NULL;
- } else {
- err = PTR_ERR(host->generic_phy);
- dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out_variant_clear;
- }
- }
-
- host->device_reset = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
- if (IS_ERR(host->device_reset)) {
- err = PTR_ERR(host->device_reset);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to acquire reset gpio: %d\n", err);
- goto out_variant_clear;
- }
-
- ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
- &host->hw_ver.minor, &host->hw_ver.step);
-
- /*
- * for newer controllers, device reference clock control bit has
- * moved inside UFS controller register address space itself.
- */
- if (host->hw_ver.major >= 0x02) {
- host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
- host->dev_ref_clk_en_mask = BIT(26);
- } else {
- /* "dev_ref_clk_ctrl_mem" is optional resource */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dev_ref_clk_ctrl_mem");
- if (res) {
- host->dev_ref_clk_ctrl_mmio =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
- host->dev_ref_clk_ctrl_mmio = NULL;
- host->dev_ref_clk_en_mask = BIT(5);
- }
- }
-
- list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk_unipro"))
- clki->keep_link_active = true;
- }
-
- err = ufs_qcom_init_lane_clks(host);
- if (err)
- goto out_variant_clear;
-
- ufs_qcom_set_caps(hba);
- ufs_qcom_advertise_quirks(hba);
-
- err = ufs_qcom_ice_init(host);
- if (err)
- goto out_variant_clear;
-
- ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
-
- if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
- ufs_qcom_hosts[hba->dev->id] = host;
-
- host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
- ufs_qcom_get_default_testbus_cfg(host);
- err = ufs_qcom_testbus_config(host);
- if (err) {
- dev_warn(dev, "%s: failed to configure the testbus %d\n",
- __func__, err);
- err = 0;
- }
-
- goto out;
-
-out_variant_clear:
- ufshcd_set_variant(hba, NULL);
-out:
- return err;
-}
-
-static void ufs_qcom_exit(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(host->generic_phy);
- phy_exit(host->generic_phy);
-}
-
-static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles)
-{
- int err;
- u32 core_clk_ctrl_reg;
-
- if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
- return -EINVAL;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
- if (err)
- goto out;
-
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
- core_clk_ctrl_reg |= clk_cycles;
-
- /* Clear CORE_CLK_DIV_EN */
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
-
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
-out:
- return err;
-}
-
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
-{
- /* nothing to do as of now */
- return 0;
-}
-
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- /* set unipro core clock cycles to 150 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
-}
-
-static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- u32 core_clk_ctrl_reg;
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
-
- /* make sure CORE_CLK_DIV_EN is cleared */
- if (!err &&
- (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
- }
-
- return err;
-}
-
-static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
-
- /* set unipro core clock cycles to 75 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
-}
-
-static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
- bool scale_up, enum ufs_notify_change_status status)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
- int err = 0;
-
- if (status == PRE_CHANGE) {
- err = ufshcd_uic_hibern8_enter(hba);
- if (err)
- return err;
- if (scale_up)
- err = ufs_qcom_clk_scale_up_pre_change(hba);
- else
- err = ufs_qcom_clk_scale_down_pre_change(hba);
- if (err)
- ufshcd_uic_hibern8_exit(hba);
-
- } else {
- if (scale_up)
- err = ufs_qcom_clk_scale_up_post_change(hba);
- else
- err = ufs_qcom_clk_scale_down_post_change(hba);
-
-
- if (err || !dev_req_params) {
- ufshcd_uic_hibern8_exit(hba);
- goto out;
- }
-
- ufs_qcom_cfg_timers(hba,
- dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate,
- false);
- ufshcd_uic_hibern8_exit(hba);
- }
-
-out:
- return err;
-}
-
-static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
- void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, const char *str, void *priv))
-{
- u32 reg;
- struct ufs_qcom_host *host;
-
- if (unlikely(!hba)) {
- pr_err("%s: hba is NULL\n", __func__);
- return;
- }
- if (unlikely(!print_fn)) {
- dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
- return;
- }
-
- host = ufshcd_get_variant(hba);
- if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
- return;
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
- print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
-
- reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UTP_DBG_RAMS_EN;
- ufshcd_writel(hba, reg, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
- print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
- print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
- print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
-
- /* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
- print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
- print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
- print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
- print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
- print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
-}
-
-static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
-{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
- UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- } else {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
- }
-}
-
-static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
-{
- /* provide a legal default configuration */
- host->testbus.select_major = TSTBUS_UNIPRO;
- host->testbus.select_minor = 37;
-}
-
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
-{
- if (host->testbus.select_major >= TSTBUS_MAX) {
- dev_err(host->hba->dev,
- "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
- return false;
- }
-
- return true;
-}
-
-int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
-{
- int reg;
- int offset;
- u32 mask = TEST_BUS_SUB_SEL_MASK;
-
- if (!host)
- return -EINVAL;
-
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
-
- switch (host->testbus.select_major) {
- case TSTBUS_UAWM:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 24;
- break;
- case TSTBUS_UARM:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 16;
- break;
- case TSTBUS_TXUC:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 8;
- break;
- case TSTBUS_RXUC:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 0;
- break;
- case TSTBUS_DFC:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 24;
- break;
- case TSTBUS_TRLUT:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 16;
- break;
- case TSTBUS_TMRLUT:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 8;
- break;
- case TSTBUS_OCSC:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 0;
- break;
- case TSTBUS_WRAPPER:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 16;
- break;
- case TSTBUS_COMBINED:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 8;
- break;
- case TSTBUS_UTP_HCI:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 0;
- break;
- case TSTBUS_UNIPRO:
- reg = UFS_UNIPRO_CFG;
- offset = 20;
- mask = 0xFFF;
- break;
- /*
- * No need for a default case, since
- * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
- * is legal
- */
- }
- mask <<= offset;
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
- REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
- (u32)host->testbus.select_minor << offset,
- reg);
- ufs_qcom_enable_test_bus(host);
- /*
- * Make sure the test bus configuration is
- * committed before returning.
- */
- mb();
-
- return 0;
-}
-
-static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
- "HCI Vendor Specific Registers ");
-
- ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
-}
-
-/**
- * ufs_qcom_device_reset() - toggle the (optional) device reset line
- * @hba: per-adapter instance
- *
- * Toggles the (optional) reset line to reset the attached device.
- */
-static int ufs_qcom_device_reset(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- /* reset gpio is optional */
- if (!host->device_reset)
- return -EOPNOTSUPP;
-
- /*
- * The UFS device shall detect reset pulses of 1us, sleep for 10us to
- * be on the safe side.
- */
- ufs_qcom_device_reset_ctrl(hba, true);
- usleep_range(10, 15);
-
- ufs_qcom_device_reset_ctrl(hba, false);
- usleep_range(10, 15);
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
-static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile *p,
- void *data)
-{
- static struct devfreq_simple_ondemand_data *d;
-
- if (!data)
- return;
-
- d = (struct devfreq_simple_ondemand_data *)data;
- p->polling_ms = 60;
- d->upthreshold = 70;
- d->downdifferential = 5;
-}
-#else
-static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile *p,
- void *data)
-{
-}
-#endif
-
-/*
- * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
- *
- * The variant operations configure the necessary controller and PHY
- * handshake during initialization.
- */
-static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
- .name = "qcom",
- .init = ufs_qcom_init,
- .exit = ufs_qcom_exit,
- .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
- .clk_scale_notify = ufs_qcom_clk_scale_notify,
- .setup_clocks = ufs_qcom_setup_clocks,
- .hce_enable_notify = ufs_qcom_hce_enable_notify,
- .link_startup_notify = ufs_qcom_link_startup_notify,
- .pwr_change_notify = ufs_qcom_pwr_change_notify,
- .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
- .suspend = ufs_qcom_suspend,
- .resume = ufs_qcom_resume,
- .dbg_register_dump = ufs_qcom_dump_dbg_regs,
- .device_reset = ufs_qcom_device_reset,
- .config_scaling_param = ufs_qcom_config_scaling_param,
- .program_key = ufs_qcom_ice_program_key,
-};
-
-/**
- * ufs_qcom_probe - probe routine of the driver
- * @pdev: pointer to Platform device handle
- *
- * Return zero for success and non-zero for failure
- */
-static int ufs_qcom_probe(struct platform_device *pdev)
-{
- int err;
- struct device *dev = &pdev->dev;
-
- /* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
-
- return err;
-}
-
-/**
- * ufs_qcom_remove - set driver_data of the device to NULL
- * @pdev: pointer to platform device handle
- *
- * Always returns 0
- */
-static int ufs_qcom_remove(struct platform_device *pdev)
-{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
- return 0;
-}
-
-static const struct of_device_id ufs_qcom_of_match[] = {
- { .compatible = "qcom,ufshc"},
- {},
-};
-MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ufs_qcom_acpi_match[] = {
- { "QCOM24A5" },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
-#endif
-
-static const struct dev_pm_ops ufs_qcom_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
-};
-
-static struct platform_driver ufs_qcom_pltform = {
- .probe = ufs_qcom_probe,
- .remove = ufs_qcom_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "ufshcd-qcom",
- .pm = &ufs_qcom_pm_ops,
- .of_match_table = of_match_ptr(ufs_qcom_of_match),
- .acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
- },
-};
-module_platform_driver(ufs_qcom_pltform);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
deleted file mode 100644
index 8208e3a3ef59..000000000000
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ /dev/null
@@ -1,272 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef UFS_QCOM_H_
-#define UFS_QCOM_H_
-
-#include <linux/reset-controller.h>
-#include <linux/reset.h>
-
-#define MAX_UFS_QCOM_HOSTS 1
-#define MAX_U32 (~(u32)0)
-#define MPHY_TX_FSM_STATE 0x41
-#define TX_FSM_HIBERN8 0x1
-#define HBRN8_POLL_TOUT_MS 100
-#define DEFAULT_CLK_RATE_HZ 1000000
-#define BUS_VECTOR_NAME_LEN 32
-
-#define UFS_HW_VER_MAJOR_SHFT (28)
-#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
-#define UFS_HW_VER_MINOR_SHFT (16)
-#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
-#define UFS_HW_VER_STEP_SHFT (0)
-#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
-
-/* vendor specific pre-defined parameters */
-#define SLOW 1
-#define FAST 2
-
-#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
-
-/* QCOM UFS host controller vendor specific registers */
-enum {
- REG_UFS_SYS1CLK_1US = 0xC0,
- REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
- REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
- REG_UFS_PA_ERR_CODE = 0xCC,
- REG_UFS_RETRY_TIMER_REG = 0xD0,
- REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
- REG_UFS_CFG1 = 0xDC,
- REG_UFS_CFG2 = 0xE0,
- REG_UFS_HW_VERSION = 0xE4,
-
- UFS_TEST_BUS = 0xE8,
- UFS_TEST_BUS_CTRL_0 = 0xEC,
- UFS_TEST_BUS_CTRL_1 = 0xF0,
- UFS_TEST_BUS_CTRL_2 = 0xF4,
- UFS_UNIPRO_CFG = 0xF8,
-
- /*
- * QCOM UFS host controller vendor specific registers
- * added in HW Version 3.0.0
- */
- UFS_AH8_CFG = 0xFC,
-};
-
-/* QCOM UFS host controller vendor specific debug registers */
-enum {
- UFS_DBG_RD_REG_UAWM = 0x100,
- UFS_DBG_RD_REG_UARM = 0x200,
- UFS_DBG_RD_REG_TXUC = 0x300,
- UFS_DBG_RD_REG_RXUC = 0x400,
- UFS_DBG_RD_REG_DFC = 0x500,
- UFS_DBG_RD_REG_TRLUT = 0x600,
- UFS_DBG_RD_REG_TMRLUT = 0x700,
- UFS_UFS_DBG_RD_REG_OCSC = 0x800,
-
- UFS_UFS_DBG_RD_DESC_RAM = 0x1500,
- UFS_UFS_DBG_RD_PRDT_RAM = 0x1700,
- UFS_UFS_DBG_RD_RESP_RAM = 0x1800,
- UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
-};
-
-#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
-#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
-
-/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL 0x1
-#define UTP_DBG_RAMS_EN 0x20000
-#define TEST_BUS_EN BIT(18)
-#define TEST_BUS_SEL GENMASK(22, 19)
-#define UFS_REG_TEST_BUS_EN BIT(30)
-
-/* bit definitions for REG_UFS_CFG2 register */
-#define UAWM_HW_CGC_EN (1 << 0)
-#define UARM_HW_CGC_EN (1 << 1)
-#define TXUC_HW_CGC_EN (1 << 2)
-#define RXUC_HW_CGC_EN (1 << 3)
-#define DFC_HW_CGC_EN (1 << 4)
-#define TRLUT_HW_CGC_EN (1 << 5)
-#define TMRLUT_HW_CGC_EN (1 << 6)
-#define OCSC_HW_CGC_EN (1 << 7)
-
-/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
-#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */
-
-#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
- TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
- DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
- TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
-
-/* bit offset */
-enum {
- OFFSET_UFS_PHY_SOFT_RESET = 1,
- OFFSET_CLK_NS_REG = 10,
-};
-
-/* bit masks */
-enum {
- MASK_UFS_PHY_SOFT_RESET = 0x2,
- MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
- MASK_CLK_NS_REG = 0xFFFC00,
-};
-
-/* QCOM UFS debug print bit mask */
-#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
-#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
-#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2)
-
-#define UFS_QCOM_DBG_PRINT_ALL \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \
- UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
-
-/* QUniPro Vendor specific attributes */
-#define PA_VS_CONFIG_REG1 0x9000
-#define DME_VS_CORE_CLK_CTRL 0xD002
-/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
-#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
-
-static inline void
-ufs_qcom_get_controller_revision(struct ufs_hba *hba,
- u8 *major, u16 *minor, u16 *step)
-{
- u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
-
- *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
- *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
- *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
-};
-
-static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
-{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
-
- /*
- * Make sure assertion of ufs phy reset is written to
- * register before returning
- */
- mb();
-}
-
-static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
-{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
-
- /*
- * Make sure de-assertion of ufs phy reset is written to
- * register before returning
- */
- mb();
-}
-
-/* Host controller hardware version: major.minor.step */
-struct ufs_hw_version {
- u16 step;
- u16 minor;
- u8 major;
-};
-
-struct ufs_qcom_testbus {
- u8 select_major;
- u8 select_minor;
-};
-
-struct gpio_desc;
-
-struct ufs_qcom_host {
- /*
- * Set this capability if host controller supports the QUniPro mode
- * and if driver wants the Host controller to operate in QUniPro mode.
- * Note: By default this capability will be kept enabled if host
- * controller supports the QUniPro mode.
- */
- #define UFS_QCOM_CAP_QUNIPRO 0x1
-
- /*
- * Set this capability if host controller can retain the secure
- * configuration even after UFS controller core power collapse.
- */
- #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
- u32 caps;
-
- struct phy *generic_phy;
- struct ufs_hba *hba;
- struct ufs_pa_layer_attr dev_req_params;
- struct clk *rx_l0_sync_clk;
- struct clk *tx_l0_sync_clk;
- struct clk *rx_l1_sync_clk;
- struct clk *tx_l1_sync_clk;
- bool is_lane_clks_enabled;
-
- void __iomem *dev_ref_clk_ctrl_mmio;
- bool is_dev_ref_clk_enabled;
- struct ufs_hw_version hw_ver;
-#ifdef CONFIG_SCSI_UFS_CRYPTO
- void __iomem *ice_mmio;
-#endif
-
- u32 dev_ref_clk_en_mask;
-
- /* Bitmask for enabling debug prints */
- u32 dbg_print_en;
- struct ufs_qcom_testbus testbus;
-
- /* Reset control of HCI */
- struct reset_control *core_reset;
- struct reset_controller_dev rcdev;
-
- struct gpio_desc *device_reset;
-};
-
-static inline u32
-ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
-{
- if (host->hw_ver.major <= 0x02)
- return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
-
- return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
-};
-
-#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
-#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
-#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
-
-int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
-
-static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
-{
- if (host->caps & UFS_QCOM_CAP_QUNIPRO)
- return true;
- else
- return false;
-}
-
-/* ufs-qcom-ice.c */
-
-#ifdef CONFIG_SCSI_UFS_CRYPTO
-int ufs_qcom_ice_init(struct ufs_qcom_host *host);
-int ufs_qcom_ice_enable(struct ufs_qcom_host *host);
-int ufs_qcom_ice_resume(struct ufs_qcom_host *host);
-int ufs_qcom_ice_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot);
-#else
-static inline int ufs_qcom_ice_init(struct ufs_qcom_host *host)
-{
- return 0;
-}
-static inline int ufs_qcom_ice_enable(struct ufs_qcom_host *host)
-{
- return 0;
-}
-static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
-{
- return 0;
-}
-#define ufs_qcom_ice_program_key NULL
-#endif /* !CONFIG_SCSI_UFS_CRYPTO */
-
-#endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
deleted file mode 100644
index 5c405ff7b6ea..000000000000
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ /dev/null
@@ -1,1267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Western Digital Corporation
-
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/bitfield.h>
-#include <asm/unaligned.h>
-
-#include "ufs.h"
-#include "ufs-sysfs.h"
-
-static const char *ufshcd_uic_link_state_to_string(
- enum uic_link_state state)
-{
- switch (state) {
- case UIC_LINK_OFF_STATE: return "OFF";
- case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
- case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
- case UIC_LINK_BROKEN_STATE: return "BROKEN";
- default: return "UNKNOWN";
- }
-}
-
-static const char *ufshcd_ufs_dev_pwr_mode_to_string(
- enum ufs_dev_pwr_mode state)
-{
- switch (state) {
- case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
- case UFS_SLEEP_PWR_MODE: return "SLEEP";
- case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
- case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP";
- default: return "UNKNOWN";
- }
-}
-
-static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count,
- bool rpm)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_dev_info *dev_info = &hba->dev_info;
- unsigned long flags, value;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- if (value >= UFS_PM_LVL_MAX)
- return -EINVAL;
-
- if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE &&
- (!(hba->caps & UFSHCD_CAP_DEEPSLEEP) ||
- !(dev_info->wspecversion >= 0x310)))
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (rpm)
- hba->rpm_lvl = value;
- else
- hba->spm_lvl = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static ssize_t rpm_lvl_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
-}
-
-static ssize_t rpm_lvl_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true);
-}
-
-static ssize_t rpm_target_dev_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
-}
-
-static ssize_t rpm_target_link_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
- ufs_pm_lvl_states[hba->rpm_lvl].link_state));
-}
-
-static ssize_t spm_lvl_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%d\n", hba->spm_lvl);
-}
-
-static ssize_t spm_lvl_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false);
-}
-
-static ssize_t spm_target_dev_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
- ufs_pm_lvl_states[hba->spm_lvl].dev_state));
-}
-
-static ssize_t spm_target_link_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
- ufs_pm_lvl_states[hba->spm_lvl].link_state));
-}
-
-/* Convert Auto-Hibernate Idle Timer register value to microseconds */
-static int ufshcd_ahit_to_us(u32 ahit)
-{
- int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit);
- int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit);
-
- for (; scale > 0; --scale)
- timer *= UFSHCI_AHIBERN8_SCALE_FACTOR;
-
- return timer;
-}
-
-/* Convert microseconds to Auto-Hibernate Idle Timer register value */
-static u32 ufshcd_us_to_ahit(unsigned int timer)
-{
- unsigned int scale;
-
- for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
- timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
-
- return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
-}
-
-static ssize_t auto_hibern8_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u32 ahit;
- int ret;
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return -EOPNOTSUPP;
-
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- ret = -EBUSY;
- goto out;
- }
-
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
- ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
-
- ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
-
-out:
- up(&hba->host_sem);
- return ret;
-}
-
-static ssize_t auto_hibern8_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned int timer;
- int ret = 0;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &timer))
- return -EINVAL;
-
- if (timer > UFSHCI_AHIBERN8_MAX)
- return -EINVAL;
-
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- ret = -EBUSY;
- goto out;
- }
-
- ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
-
-out:
- up(&hba->host_sem);
- return ret ? ret : count;
-}
-
-static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
-}
-
-static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned int wb_enable;
- ssize_t res;
-
- if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
- /*
- * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
- * on/off will be done while clock scaling up/down.
- */
- dev_warn(dev, "To control WB through wb_on is not allowed!\n");
- return -EOPNOTSUPP;
- }
-
- if (kstrtouint(buf, 0, &wb_enable))
- return -EINVAL;
-
- if (wb_enable != 0 && wb_enable != 1)
- return -EINVAL;
-
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- res = -EBUSY;
- goto out;
- }
-
- ufshcd_rpm_get_sync(hba);
- res = ufshcd_wb_toggle(hba, wb_enable);
- ufshcd_rpm_put_sync(hba);
-out:
- up(&hba->host_sem);
- return res < 0 ? res : count;
-}
-
-static DEVICE_ATTR_RW(rpm_lvl);
-static DEVICE_ATTR_RO(rpm_target_dev_state);
-static DEVICE_ATTR_RO(rpm_target_link_state);
-static DEVICE_ATTR_RW(spm_lvl);
-static DEVICE_ATTR_RO(spm_target_dev_state);
-static DEVICE_ATTR_RO(spm_target_link_state);
-static DEVICE_ATTR_RW(auto_hibern8);
-static DEVICE_ATTR_RW(wb_on);
-
-static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
- &dev_attr_rpm_lvl.attr,
- &dev_attr_rpm_target_dev_state.attr,
- &dev_attr_rpm_target_link_state.attr,
- &dev_attr_spm_lvl.attr,
- &dev_attr_spm_target_dev_state.attr,
- &dev_attr_spm_target_link_state.attr,
- &dev_attr_auto_hibern8.attr,
- &dev_attr_wb_on.attr,
- NULL
-};
-
-static const struct attribute_group ufs_sysfs_default_group = {
- .attrs = ufs_sysfs_ufshcd_attrs,
-};
-
-static ssize_t monitor_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
-}
-
-static ssize_t monitor_enable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long value, flags;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- value = !!value;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (value == hba->monitor.enabled)
- goto out_unlock;
-
- if (!value) {
- memset(&hba->monitor, 0, sizeof(hba->monitor));
- } else {
- hba->monitor.enabled = true;
- hba->monitor.enabled_ts = ktime_get();
- }
-
-out_unlock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static ssize_t monitor_chunk_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
-}
-
-static ssize_t monitor_chunk_size_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long value, flags;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- /* Only allow chunk size change when monitor is disabled */
- if (!hba->monitor.enabled)
- hba->monitor.chunk_size = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static ssize_t read_total_sectors_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
-}
-
-static ssize_t read_total_busy_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.total_busy[READ]));
-}
-
-static ssize_t read_nr_requests_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
-}
-
-static ssize_t read_req_latency_avg_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_hba_monitor *m = &hba->monitor;
-
- return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
- m->nr_req[READ]));
-}
-
-static ssize_t read_req_latency_max_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.lat_max[READ]));
-}
-
-static ssize_t read_req_latency_min_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.lat_min[READ]));
-}
-
-static ssize_t read_req_latency_sum_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.lat_sum[READ]));
-}
-
-static ssize_t write_total_sectors_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
-}
-
-static ssize_t write_total_busy_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.total_busy[WRITE]));
-}
-
-static ssize_t write_nr_requests_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
-}
-
-static ssize_t write_req_latency_avg_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_hba_monitor *m = &hba->monitor;
-
- return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
- m->nr_req[WRITE]));
-}
-
-static ssize_t write_req_latency_max_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.lat_max[WRITE]));
-}
-
-static ssize_t write_req_latency_min_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.lat_min[WRITE]));
-}
-
-static ssize_t write_req_latency_sum_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%llu\n",
- ktime_to_us(hba->monitor.lat_sum[WRITE]));
-}
-
-static DEVICE_ATTR_RW(monitor_enable);
-static DEVICE_ATTR_RW(monitor_chunk_size);
-static DEVICE_ATTR_RO(read_total_sectors);
-static DEVICE_ATTR_RO(read_total_busy);
-static DEVICE_ATTR_RO(read_nr_requests);
-static DEVICE_ATTR_RO(read_req_latency_avg);
-static DEVICE_ATTR_RO(read_req_latency_max);
-static DEVICE_ATTR_RO(read_req_latency_min);
-static DEVICE_ATTR_RO(read_req_latency_sum);
-static DEVICE_ATTR_RO(write_total_sectors);
-static DEVICE_ATTR_RO(write_total_busy);
-static DEVICE_ATTR_RO(write_nr_requests);
-static DEVICE_ATTR_RO(write_req_latency_avg);
-static DEVICE_ATTR_RO(write_req_latency_max);
-static DEVICE_ATTR_RO(write_req_latency_min);
-static DEVICE_ATTR_RO(write_req_latency_sum);
-
-static struct attribute *ufs_sysfs_monitor_attrs[] = {
- &dev_attr_monitor_enable.attr,
- &dev_attr_monitor_chunk_size.attr,
- &dev_attr_read_total_sectors.attr,
- &dev_attr_read_total_busy.attr,
- &dev_attr_read_nr_requests.attr,
- &dev_attr_read_req_latency_avg.attr,
- &dev_attr_read_req_latency_max.attr,
- &dev_attr_read_req_latency_min.attr,
- &dev_attr_read_req_latency_sum.attr,
- &dev_attr_write_total_sectors.attr,
- &dev_attr_write_total_busy.attr,
- &dev_attr_write_nr_requests.attr,
- &dev_attr_write_req_latency_avg.attr,
- &dev_attr_write_req_latency_max.attr,
- &dev_attr_write_req_latency_min.attr,
- &dev_attr_write_req_latency_sum.attr,
- NULL
-};
-
-static const struct attribute_group ufs_sysfs_monitor_group = {
- .name = "monitor",
- .attrs = ufs_sysfs_monitor_attrs,
-};
-
-static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- u8 desc_index,
- u8 param_offset,
- u8 *sysfs_buf,
- u8 param_size)
-{
- u8 desc_buf[8] = {0};
- int ret;
-
- if (param_size > 8)
- return -EINVAL;
-
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- ret = -EBUSY;
- goto out;
- }
-
- ufshcd_rpm_get_sync(hba);
- ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
- param_offset, desc_buf, param_size);
- ufshcd_rpm_put_sync(hba);
- if (ret) {
- ret = -EINVAL;
- goto out;
- }
-
- switch (param_size) {
- case 1:
- ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
- break;
- case 2:
- ret = sysfs_emit(sysfs_buf, "0x%04X\n",
- get_unaligned_be16(desc_buf));
- break;
- case 4:
- ret = sysfs_emit(sysfs_buf, "0x%08X\n",
- get_unaligned_be32(desc_buf));
- break;
- case 8:
- ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
- get_unaligned_be64(desc_buf));
- break;
- }
-
-out:
- up(&hba->host_sem);
- return ret;
-}
-
-#define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
- 0, _duname##_DESC_PARAM##_puname, buf, _size); \
-} \
-static DEVICE_ATTR_RO(_name)
-
-#define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, DEVICE, _size)
-
-UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1);
-UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1);
-UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1);
-UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1);
-UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1);
-UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1);
-UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1);
-UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1);
-UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1);
-UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1);
-UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1);
-UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1);
-UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1);
-UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1);
-UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2);
-UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2);
-UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2);
-UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1);
-UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2);
-UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1);
-UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1);
-UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1);
-UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
-UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
-UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
-UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
-UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2);
-UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1);
-UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
-UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
-UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
-UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
-
-static struct attribute *ufs_sysfs_device_descriptor[] = {
- &dev_attr_device_type.attr,
- &dev_attr_device_class.attr,
- &dev_attr_device_sub_class.attr,
- &dev_attr_protocol.attr,
- &dev_attr_number_of_luns.attr,
- &dev_attr_number_of_wluns.attr,
- &dev_attr_boot_enable.attr,
- &dev_attr_descriptor_access_enable.attr,
- &dev_attr_initial_power_mode.attr,
- &dev_attr_high_priority_lun.attr,
- &dev_attr_secure_removal_type.attr,
- &dev_attr_support_security_lun.attr,
- &dev_attr_bkops_termination_latency.attr,
- &dev_attr_initial_active_icc_level.attr,
- &dev_attr_specification_version.attr,
- &dev_attr_manufacturing_date.attr,
- &dev_attr_manufacturer_id.attr,
- &dev_attr_rtt_capability.attr,
- &dev_attr_rtc_update.attr,
- &dev_attr_ufs_features.attr,
- &dev_attr_ffu_timeout.attr,
- &dev_attr_queue_depth.attr,
- &dev_attr_device_version.attr,
- &dev_attr_number_of_secure_wpa.attr,
- &dev_attr_psa_max_data_size.attr,
- &dev_attr_psa_state_timeout.attr,
- &dev_attr_hpb_version.attr,
- &dev_attr_hpb_control.attr,
- &dev_attr_ext_feature_sup.attr,
- &dev_attr_wb_presv_us_en.attr,
- &dev_attr_wb_type.attr,
- &dev_attr_wb_shared_alloc_units.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_device_descriptor_group = {
- .name = "device_descriptor",
- .attrs = ufs_sysfs_device_descriptor,
-};
-
-#define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size)
-
-UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2);
-UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2);
-
-static struct attribute *ufs_sysfs_interconnect_descriptor[] = {
- &dev_attr_unipro_version.attr,
- &dev_attr_mphy_version.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = {
- .name = "interconnect_descriptor",
- .attrs = ufs_sysfs_interconnect_descriptor,
-};
-
-#define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size)
-
-UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8);
-UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1);
-UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4);
-UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1);
-UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1);
-UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1);
-UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1);
-UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2);
-UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units,
- _SCM_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor,
- _SCM_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units,
- _NPM_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor,
- _NPM_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units,
- _ENM1_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor,
- _ENM1_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units,
- _ENM2_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor,
- _ENM2_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units,
- _ENM3_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor,
- _ENM3_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
- _ENM4_MAX_NUM_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
- _ENM4_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2);
-UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
-UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
-UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
-UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
-UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
-
-
-static struct attribute *ufs_sysfs_geometry_descriptor[] = {
- &dev_attr_raw_device_capacity.attr,
- &dev_attr_max_number_of_luns.attr,
- &dev_attr_segment_size.attr,
- &dev_attr_allocation_unit_size.attr,
- &dev_attr_min_addressable_block_size.attr,
- &dev_attr_optimal_read_block_size.attr,
- &dev_attr_optimal_write_block_size.attr,
- &dev_attr_max_in_buffer_size.attr,
- &dev_attr_max_out_buffer_size.attr,
- &dev_attr_rpmb_rw_size.attr,
- &dev_attr_dyn_capacity_resource_policy.attr,
- &dev_attr_data_ordering.attr,
- &dev_attr_max_number_of_contexts.attr,
- &dev_attr_sys_data_tag_unit_size.attr,
- &dev_attr_sys_data_tag_resource_size.attr,
- &dev_attr_secure_removal_types.attr,
- &dev_attr_memory_types.attr,
- &dev_attr_sys_code_memory_max_alloc_units.attr,
- &dev_attr_sys_code_memory_capacity_adjustment_factor.attr,
- &dev_attr_non_persist_memory_max_alloc_units.attr,
- &dev_attr_non_persist_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh1_memory_max_alloc_units.attr,
- &dev_attr_enh1_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh2_memory_max_alloc_units.attr,
- &dev_attr_enh2_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh3_memory_max_alloc_units.attr,
- &dev_attr_enh3_memory_capacity_adjustment_factor.attr,
- &dev_attr_enh4_memory_max_alloc_units.attr,
- &dev_attr_enh4_memory_capacity_adjustment_factor.attr,
- &dev_attr_hpb_region_size.attr,
- &dev_attr_hpb_number_lu.attr,
- &dev_attr_hpb_subregion_size.attr,
- &dev_attr_hpb_max_active_regions.attr,
- &dev_attr_wb_max_alloc_units.attr,
- &dev_attr_wb_max_wb_luns.attr,
- &dev_attr_wb_buff_cap_adj.attr,
- &dev_attr_wb_sup_red_type.attr,
- &dev_attr_wb_sup_wb_type.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_geometry_descriptor_group = {
- .name = "geometry_descriptor",
- .attrs = ufs_sysfs_geometry_descriptor,
-};
-
-#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
- UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
-
-UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
-UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
-UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
-
-static struct attribute *ufs_sysfs_health_descriptor[] = {
- &dev_attr_eol_info.attr,
- &dev_attr_life_time_estimation_a.attr,
- &dev_attr_life_time_estimation_b.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_health_descriptor_group = {
- .name = "health_descriptor",
- .attrs = ufs_sysfs_health_descriptor,
-};
-
-#define UFS_POWER_DESC_PARAM(_name, _uname, _index) \
-static ssize_t _name##_index##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \
- PWR_DESC##_uname##_0 + _index * 2, buf, 2); \
-} \
-static DEVICE_ATTR_RO(_name##_index)
-
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14);
-UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14);
-UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15);
-
-static struct attribute *ufs_sysfs_power_descriptor[] = {
- &dev_attr_active_icc_levels_vcc0.attr,
- &dev_attr_active_icc_levels_vcc1.attr,
- &dev_attr_active_icc_levels_vcc2.attr,
- &dev_attr_active_icc_levels_vcc3.attr,
- &dev_attr_active_icc_levels_vcc4.attr,
- &dev_attr_active_icc_levels_vcc5.attr,
- &dev_attr_active_icc_levels_vcc6.attr,
- &dev_attr_active_icc_levels_vcc7.attr,
- &dev_attr_active_icc_levels_vcc8.attr,
- &dev_attr_active_icc_levels_vcc9.attr,
- &dev_attr_active_icc_levels_vcc10.attr,
- &dev_attr_active_icc_levels_vcc11.attr,
- &dev_attr_active_icc_levels_vcc12.attr,
- &dev_attr_active_icc_levels_vcc13.attr,
- &dev_attr_active_icc_levels_vcc14.attr,
- &dev_attr_active_icc_levels_vcc15.attr,
- &dev_attr_active_icc_levels_vccq0.attr,
- &dev_attr_active_icc_levels_vccq1.attr,
- &dev_attr_active_icc_levels_vccq2.attr,
- &dev_attr_active_icc_levels_vccq3.attr,
- &dev_attr_active_icc_levels_vccq4.attr,
- &dev_attr_active_icc_levels_vccq5.attr,
- &dev_attr_active_icc_levels_vccq6.attr,
- &dev_attr_active_icc_levels_vccq7.attr,
- &dev_attr_active_icc_levels_vccq8.attr,
- &dev_attr_active_icc_levels_vccq9.attr,
- &dev_attr_active_icc_levels_vccq10.attr,
- &dev_attr_active_icc_levels_vccq11.attr,
- &dev_attr_active_icc_levels_vccq12.attr,
- &dev_attr_active_icc_levels_vccq13.attr,
- &dev_attr_active_icc_levels_vccq14.attr,
- &dev_attr_active_icc_levels_vccq15.attr,
- &dev_attr_active_icc_levels_vccq20.attr,
- &dev_attr_active_icc_levels_vccq21.attr,
- &dev_attr_active_icc_levels_vccq22.attr,
- &dev_attr_active_icc_levels_vccq23.attr,
- &dev_attr_active_icc_levels_vccq24.attr,
- &dev_attr_active_icc_levels_vccq25.attr,
- &dev_attr_active_icc_levels_vccq26.attr,
- &dev_attr_active_icc_levels_vccq27.attr,
- &dev_attr_active_icc_levels_vccq28.attr,
- &dev_attr_active_icc_levels_vccq29.attr,
- &dev_attr_active_icc_levels_vccq210.attr,
- &dev_attr_active_icc_levels_vccq211.attr,
- &dev_attr_active_icc_levels_vccq212.attr,
- &dev_attr_active_icc_levels_vccq213.attr,
- &dev_attr_active_icc_levels_vccq214.attr,
- &dev_attr_active_icc_levels_vccq215.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_power_descriptor_group = {
- .name = "power_descriptor",
- .attrs = ufs_sysfs_power_descriptor,
-};
-
-#define UFS_STRING_DESCRIPTOR(_name, _pname) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- u8 index; \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- int ret; \
- int desc_len = QUERY_DESC_MAX_SIZE; \
- u8 *desc_buf; \
- \
- down(&hba->host_sem); \
- if (!ufshcd_is_user_access_allowed(hba)) { \
- up(&hba->host_sem); \
- return -EBUSY; \
- } \
- desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
- if (!desc_buf) { \
- up(&hba->host_sem); \
- return -ENOMEM; \
- } \
- ufshcd_rpm_get_sync(hba); \
- ret = ufshcd_query_descriptor_retry(hba, \
- UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
- 0, 0, desc_buf, &desc_len); \
- if (ret) { \
- ret = -EINVAL; \
- goto out; \
- } \
- index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
- kfree(desc_buf); \
- desc_buf = NULL; \
- ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
- SD_ASCII_STD); \
- if (ret < 0) \
- goto out; \
- ret = sysfs_emit(buf, "%s\n", desc_buf); \
-out: \
- ufshcd_rpm_put_sync(hba); \
- kfree(desc_buf); \
- up(&hba->host_sem); \
- return ret; \
-} \
-static DEVICE_ATTR_RO(_name)
-
-UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME);
-UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME);
-UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID);
-UFS_STRING_DESCRIPTOR(serial_number, _SN);
-UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV);
-
-static struct attribute *ufs_sysfs_string_descriptors[] = {
- &dev_attr_manufacturer_name.attr,
- &dev_attr_product_name.attr,
- &dev_attr_oem_id.attr,
- &dev_attr_serial_number.attr,
- &dev_attr_product_revision.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_string_descriptors_group = {
- .name = "string_descriptors",
- .attrs = ufs_sysfs_string_descriptors,
-};
-
-static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
-{
- return idn >= QUERY_FLAG_IDN_WB_EN &&
- idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
-}
-
-#define UFS_FLAG(_name, _uname) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- bool flag; \
- u8 index = 0; \
- int ret; \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- \
- down(&hba->host_sem); \
- if (!ufshcd_is_user_access_allowed(hba)) { \
- up(&hba->host_sem); \
- return -EBUSY; \
- } \
- if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
- index = ufshcd_wb_get_query_index(hba); \
- ufshcd_rpm_get_sync(hba); \
- ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
- QUERY_FLAG_IDN##_uname, index, &flag); \
- ufshcd_rpm_put_sync(hba); \
- if (ret) { \
- ret = -EINVAL; \
- goto out; \
- } \
- ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
-out: \
- up(&hba->host_sem); \
- return ret; \
-} \
-static DEVICE_ATTR_RO(_name)
-
-UFS_FLAG(device_init, _FDEVICEINIT);
-UFS_FLAG(permanent_wpe, _PERMANENT_WPE);
-UFS_FLAG(power_on_wpe, _PWR_ON_WPE);
-UFS_FLAG(bkops_enable, _BKOPS_EN);
-UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
-UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
-UFS_FLAG(busy_rtc, _BUSY_RTC);
-UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
-UFS_FLAG(wb_enable, _WB_EN);
-UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
-UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
-UFS_FLAG(hpb_enable, _HPB_EN);
-
-static struct attribute *ufs_sysfs_device_flags[] = {
- &dev_attr_device_init.attr,
- &dev_attr_permanent_wpe.attr,
- &dev_attr_power_on_wpe.attr,
- &dev_attr_bkops_enable.attr,
- &dev_attr_life_span_mode_enable.attr,
- &dev_attr_phy_resource_removal.attr,
- &dev_attr_busy_rtc.attr,
- &dev_attr_disable_fw_update.attr,
- &dev_attr_wb_enable.attr,
- &dev_attr_wb_flush_en.attr,
- &dev_attr_wb_flush_during_h8.attr,
- &dev_attr_hpb_enable.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_flags_group = {
- .name = "flags",
- .attrs = ufs_sysfs_device_flags,
-};
-
-static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
-{
- return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
- idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
-}
-
-#define UFS_ATTRIBUTE(_name, _uname) \
-static ssize_t _name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct ufs_hba *hba = dev_get_drvdata(dev); \
- u32 value; \
- int ret; \
- u8 index = 0; \
- \
- down(&hba->host_sem); \
- if (!ufshcd_is_user_access_allowed(hba)) { \
- up(&hba->host_sem); \
- return -EBUSY; \
- } \
- if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
- index = ufshcd_wb_get_query_index(hba); \
- ufshcd_rpm_get_sync(hba); \
- ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
- QUERY_ATTR_IDN##_uname, index, 0, &value); \
- ufshcd_rpm_put_sync(hba); \
- if (ret) { \
- ret = -EINVAL; \
- goto out; \
- } \
- ret = sysfs_emit(buf, "0x%08X\n", value); \
-out: \
- up(&hba->host_sem); \
- return ret; \
-} \
-static DEVICE_ATTR_RO(_name)
-
-UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
-UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD);
-UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
-UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
-UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
-UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS);
-UFS_ATTRIBUTE(purge_status, _PURGE_STATUS);
-UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
-UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
-UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
-UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
-UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
-UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
-UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
-UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
-UFS_ATTRIBUTE(psa_state, _PSA_STATE);
-UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
-UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
-UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
-UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
-UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
-
-
-static struct attribute *ufs_sysfs_attributes[] = {
- &dev_attr_boot_lun_enabled.attr,
- &dev_attr_max_data_size_hpb_single_cmd.attr,
- &dev_attr_current_power_mode.attr,
- &dev_attr_active_icc_level.attr,
- &dev_attr_ooo_data_enabled.attr,
- &dev_attr_bkops_status.attr,
- &dev_attr_purge_status.attr,
- &dev_attr_max_data_in_size.attr,
- &dev_attr_max_data_out_size.attr,
- &dev_attr_reference_clock_frequency.attr,
- &dev_attr_configuration_descriptor_lock.attr,
- &dev_attr_max_number_of_rtt.attr,
- &dev_attr_exception_event_control.attr,
- &dev_attr_exception_event_status.attr,
- &dev_attr_ffu_status.attr,
- &dev_attr_psa_state.attr,
- &dev_attr_psa_data_size.attr,
- &dev_attr_wb_flush_status.attr,
- &dev_attr_wb_avail_buf.attr,
- &dev_attr_wb_life_time_est.attr,
- &dev_attr_wb_cur_buf.attr,
- NULL,
-};
-
-static const struct attribute_group ufs_sysfs_attributes_group = {
- .name = "attributes",
- .attrs = ufs_sysfs_attributes,
-};
-
-static const struct attribute_group *ufs_sysfs_groups[] = {
- &ufs_sysfs_default_group,
- &ufs_sysfs_monitor_group,
- &ufs_sysfs_device_descriptor_group,
- &ufs_sysfs_interconnect_descriptor_group,
- &ufs_sysfs_geometry_descriptor_group,
- &ufs_sysfs_health_descriptor_group,
- &ufs_sysfs_power_descriptor_group,
- &ufs_sysfs_string_descriptors_group,
- &ufs_sysfs_flags_group,
- &ufs_sysfs_attributes_group,
- NULL,
-};
-
-#define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \
-static ssize_t _pname##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufs_hba *hba = shost_priv(sdev->host); \
- u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, \
- _duname##_DESC_PARAM##_puname)) \
- return -EINVAL; \
- return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
- lun, _duname##_DESC_PARAM##_puname, buf, _size); \
-} \
-static DEVICE_ATTR_RO(_pname)
-
-#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
- UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
-
-UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
-UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
-UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
-UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
-UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1);
-UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1);
-UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1);
-UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
-UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
-UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
-UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
-UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
-UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
-UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
-UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2);
-UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2);
-UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2);
-UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
-
-static struct attribute *ufs_sysfs_unit_descriptor[] = {
- &dev_attr_lu_enable.attr,
- &dev_attr_boot_lun_id.attr,
- &dev_attr_lun_write_protect.attr,
- &dev_attr_lun_queue_depth.attr,
- &dev_attr_psa_sensitive.attr,
- &dev_attr_lun_memory_type.attr,
- &dev_attr_data_reliability.attr,
- &dev_attr_logical_block_size.attr,
- &dev_attr_logical_block_count.attr,
- &dev_attr_erase_block_size.attr,
- &dev_attr_provisioning_type.attr,
- &dev_attr_physical_memory_resourse_count.attr,
- &dev_attr_context_capabilities.attr,
- &dev_attr_large_unit_granularity.attr,
- &dev_attr_hpb_lu_max_active_regions.attr,
- &dev_attr_hpb_pinned_region_start_offset.attr,
- &dev_attr_hpb_number_pinned_regions.attr,
- &dev_attr_wb_buf_alloc_units.attr,
- NULL,
-};
-
-const struct attribute_group ufs_sysfs_unit_descriptor_group = {
- .name = "unit_descriptor",
- .attrs = ufs_sysfs_unit_descriptor,
-};
-
-static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u32 value;
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba = shost_priv(sdev->host);
- u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
- int ret;
-
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- ret = -EBUSY;
- goto out;
- }
-
- ufshcd_rpm_get_sync(hba);
- ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
- ufshcd_rpm_put_sync(hba);
- if (ret) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = sysfs_emit(buf, "0x%08X\n", value);
-
-out:
- up(&hba->host_sem);
- return ret;
-}
-static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
-
-static struct attribute *ufs_sysfs_lun_attributes[] = {
- &dev_attr_dyn_cap_needed_attribute.attr,
- NULL,
-};
-
-const struct attribute_group ufs_sysfs_lun_attributes_group = {
- .attrs = ufs_sysfs_lun_attributes,
-};
-
-void ufs_sysfs_add_nodes(struct device *dev)
-{
- int ret;
-
- ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
- if (ret)
- dev_err(dev,
- "%s: sysfs groups creation failed (err = %d)\n",
- __func__, ret);
-}
-
-void ufs_sysfs_remove_nodes(struct device *dev)
-{
- sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups);
-}
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
deleted file mode 100644
index 0f4e750a6748..000000000000
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018 Western Digital Corporation
- */
-
-#ifndef __UFS_SYSFS_H__
-#define __UFS_SYSFS_H__
-
-#include <linux/sysfs.h>
-
-#include "ufshcd.h"
-
-void ufs_sysfs_add_nodes(struct device *dev);
-void ufs_sysfs_remove_nodes(struct device *dev);
-
-extern const struct attribute_group ufs_sysfs_unit_descriptor_group;
-extern const struct attribute_group ufs_sysfs_lun_attributes_group;
-#endif
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
deleted file mode 100644
index 0bfdca3e648e..000000000000
--- a/drivers/scsi/ufs/ufs.h
+++ /dev/null
@@ -1,652 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Universal Flash Storage Host controller driver
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#ifndef _UFS_H
-#define _UFS_H
-
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <uapi/scsi/scsi_bsg_ufs.h>
-
-#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
-#define QUERY_DESC_MAX_SIZE 255
-#define QUERY_DESC_MIN_SIZE 2
-#define QUERY_DESC_HDR_SIZE 2
-#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
- (sizeof(struct utp_upiu_header)))
-#define UFS_SENSE_SIZE 18
-
-#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
- (byte1 << 8) | (byte0))
-/*
- * UFS device may have standard LUs and LUN id could be from 0x00 to
- * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
- * UFS device may also have the Well Known LUs (also referred as W-LU)
- * which again could be from 0x00 to 0x7F. For W-LUs, device only use
- * the "Extended Addressing Format" which means the W-LUNs would be
- * from 0xc100 (SCSI_W_LUN_BASE) onwards.
- * This means max. LUN number reported from UFS device could be 0xC17F.
- */
-#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
-#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
-#define UFS_UPIU_WLUN_ID (1 << 7)
-#define UFS_RPMB_UNIT 0xC4
-
-/* WriteBooster buffer is available only for the logical unit from 0 to 7 */
-#define UFS_UPIU_MAX_WB_LUN_ID 8
-
-/* Well known logical unit id in LUN field of UPIU */
-enum {
- UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
- UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
- UFS_UPIU_BOOT_WLUN = 0xB0,
- UFS_UPIU_RPMB_WLUN = 0xC4,
-};
-
-/*
- * UFS Protocol Information Unit related definitions
- */
-
-/* Task management functions */
-enum {
- UFS_ABORT_TASK = 0x01,
- UFS_ABORT_TASK_SET = 0x02,
- UFS_CLEAR_TASK_SET = 0x04,
- UFS_LOGICAL_RESET = 0x08,
- UFS_QUERY_TASK = 0x80,
- UFS_QUERY_TASK_SET = 0x81,
-};
-
-/* UTP UPIU Transaction Codes Initiator to Target */
-enum {
- UPIU_TRANSACTION_NOP_OUT = 0x00,
- UPIU_TRANSACTION_COMMAND = 0x01,
- UPIU_TRANSACTION_DATA_OUT = 0x02,
- UPIU_TRANSACTION_TASK_REQ = 0x04,
- UPIU_TRANSACTION_QUERY_REQ = 0x16,
-};
-
-/* UTP UPIU Transaction Codes Target to Initiator */
-enum {
- UPIU_TRANSACTION_NOP_IN = 0x20,
- UPIU_TRANSACTION_RESPONSE = 0x21,
- UPIU_TRANSACTION_DATA_IN = 0x22,
- UPIU_TRANSACTION_TASK_RSP = 0x24,
- UPIU_TRANSACTION_READY_XFER = 0x31,
- UPIU_TRANSACTION_QUERY_RSP = 0x36,
- UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
-};
-
-/* UPIU Read/Write flags */
-enum {
- UPIU_CMD_FLAGS_NONE = 0x00,
- UPIU_CMD_FLAGS_WRITE = 0x20,
- UPIU_CMD_FLAGS_READ = 0x40,
-};
-
-/* UPIU Task Attributes */
-enum {
- UPIU_TASK_ATTR_SIMPLE = 0x00,
- UPIU_TASK_ATTR_ORDERED = 0x01,
- UPIU_TASK_ATTR_HEADQ = 0x02,
- UPIU_TASK_ATTR_ACA = 0x03,
-};
-
-/* UPIU Query request function */
-enum {
- UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
- UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
-};
-
-/* Flag idn for Query Requests*/
-enum flag_idn {
- QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
- QUERY_FLAG_IDN_PERMANENT_WPE = 0x02,
- QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
- QUERY_FLAG_IDN_BKOPS_EN = 0x04,
- QUERY_FLAG_IDN_LIFE_SPAN_MODE_ENABLE = 0x05,
- QUERY_FLAG_IDN_PURGE_ENABLE = 0x06,
- QUERY_FLAG_IDN_RESERVED2 = 0x07,
- QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL = 0x08,
- QUERY_FLAG_IDN_BUSY_RTC = 0x09,
- QUERY_FLAG_IDN_RESERVED3 = 0x0A,
- QUERY_FLAG_IDN_PERMANENTLY_DISABLE_FW_UPDATE = 0x0B,
- QUERY_FLAG_IDN_WB_EN = 0x0E,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
- QUERY_FLAG_IDN_HPB_RESET = 0x11,
- QUERY_FLAG_IDN_HPB_EN = 0x12,
-};
-
-/* Attribute idn for Query requests */
-enum attr_idn {
- QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
- QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
- QUERY_ATTR_IDN_POWER_MODE = 0x02,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
- QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
- QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
- QUERY_ATTR_IDN_PURGE_STATUS = 0x06,
- QUERY_ATTR_IDN_MAX_DATA_IN = 0x07,
- QUERY_ATTR_IDN_MAX_DATA_OUT = 0x08,
- QUERY_ATTR_IDN_DYN_CAP_NEEDED = 0x09,
- QUERY_ATTR_IDN_REF_CLK_FREQ = 0x0A,
- QUERY_ATTR_IDN_CONF_DESC_LOCK = 0x0B,
- QUERY_ATTR_IDN_MAX_NUM_OF_RTT = 0x0C,
- QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
- QUERY_ATTR_IDN_EE_STATUS = 0x0E,
- QUERY_ATTR_IDN_SECONDS_PASSED = 0x0F,
- QUERY_ATTR_IDN_CNTX_CONF = 0x10,
- QUERY_ATTR_IDN_CORR_PRG_BLK_NUM = 0x11,
- QUERY_ATTR_IDN_RESERVED2 = 0x12,
- QUERY_ATTR_IDN_RESERVED3 = 0x13,
- QUERY_ATTR_IDN_FFU_STATUS = 0x14,
- QUERY_ATTR_IDN_PSA_STATE = 0x15,
- QUERY_ATTR_IDN_PSA_DATA_SIZE = 0x16,
- QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME = 0x17,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP = 0x18,
- QUERY_ATTR_IDN_HIGH_TEMP_BOUND = 0x19,
- QUERY_ATTR_IDN_LOW_TEMP_BOUND = 0x1A,
- QUERY_ATTR_IDN_WB_FLUSH_STATUS = 0x1C,
- QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D,
- QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
- QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
-};
-
-/* Descriptor idn for Query requests */
-enum desc_idn {
- QUERY_DESC_IDN_DEVICE = 0x0,
- QUERY_DESC_IDN_CONFIGURATION = 0x1,
- QUERY_DESC_IDN_UNIT = 0x2,
- QUERY_DESC_IDN_RFU_0 = 0x3,
- QUERY_DESC_IDN_INTERCONNECT = 0x4,
- QUERY_DESC_IDN_STRING = 0x5,
- QUERY_DESC_IDN_RFU_1 = 0x6,
- QUERY_DESC_IDN_GEOMETRY = 0x7,
- QUERY_DESC_IDN_POWER = 0x8,
- QUERY_DESC_IDN_HEALTH = 0x9,
- QUERY_DESC_IDN_MAX,
-};
-
-enum desc_header_offset {
- QUERY_DESC_LENGTH_OFFSET = 0x00,
- QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
-};
-
-/* Unit descriptor parameters offsets in bytes*/
-enum unit_desc_param {
- UNIT_DESC_PARAM_LEN = 0x0,
- UNIT_DESC_PARAM_TYPE = 0x1,
- UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
- UNIT_DESC_PARAM_LU_ENABLE = 0x3,
- UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
- UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
- UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
- UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7,
- UNIT_DESC_PARAM_MEM_TYPE = 0x8,
- UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
- UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
- UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
- UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
- UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
- UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
- UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
- UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
- UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23,
- UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25,
- UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27,
- UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29,
-};
-
-/* Device descriptor parameters offsets in bytes*/
-enum device_desc_param {
- DEVICE_DESC_PARAM_LEN = 0x0,
- DEVICE_DESC_PARAM_TYPE = 0x1,
- DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
- DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
- DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
- DEVICE_DESC_PARAM_PRTCL = 0x5,
- DEVICE_DESC_PARAM_NUM_LU = 0x6,
- DEVICE_DESC_PARAM_NUM_WLU = 0x7,
- DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
- DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
- DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
- DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
- DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
- DEVICE_DESC_PARAM_SEC_LU = 0xD,
- DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
- DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
- DEVICE_DESC_PARAM_SPEC_VER = 0x10,
- DEVICE_DESC_PARAM_MANF_DATE = 0x12,
- DEVICE_DESC_PARAM_MANF_NAME = 0x14,
- DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
- DEVICE_DESC_PARAM_SN = 0x16,
- DEVICE_DESC_PARAM_OEM_ID = 0x17,
- DEVICE_DESC_PARAM_MANF_ID = 0x18,
- DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
- DEVICE_DESC_PARAM_UD_LEN = 0x1B,
- DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
- DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
- DEVICE_DESC_PARAM_UFS_FEAT = 0x1F,
- DEVICE_DESC_PARAM_FFU_TMT = 0x20,
- DEVICE_DESC_PARAM_Q_DPTH = 0x21,
- DEVICE_DESC_PARAM_DEV_VER = 0x22,
- DEVICE_DESC_PARAM_NUM_SEC_WPA = 0x24,
- DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25,
- DEVICE_DESC_PARAM_PSA_TMT = 0x29,
- DEVICE_DESC_PARAM_PRDCT_REV = 0x2A,
- DEVICE_DESC_PARAM_HPB_VER = 0x40,
- DEVICE_DESC_PARAM_HPB_CONTROL = 0x42,
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F,
- DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53,
- DEVICE_DESC_PARAM_WB_TYPE = 0x54,
- DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS = 0x55,
-};
-
-/* Interconnect descriptor parameters offsets in bytes*/
-enum interconnect_desc_param {
- INTERCONNECT_DESC_PARAM_LEN = 0x0,
- INTERCONNECT_DESC_PARAM_TYPE = 0x1,
- INTERCONNECT_DESC_PARAM_UNIPRO_VER = 0x2,
- INTERCONNECT_DESC_PARAM_MPHY_VER = 0x4,
-};
-
-/* Geometry descriptor parameters offsets in bytes*/
-enum geometry_desc_param {
- GEOMETRY_DESC_PARAM_LEN = 0x0,
- GEOMETRY_DESC_PARAM_TYPE = 0x1,
- GEOMETRY_DESC_PARAM_DEV_CAP = 0x4,
- GEOMETRY_DESC_PARAM_MAX_NUM_LUN = 0xC,
- GEOMETRY_DESC_PARAM_SEG_SIZE = 0xD,
- GEOMETRY_DESC_PARAM_ALLOC_UNIT_SIZE = 0x11,
- GEOMETRY_DESC_PARAM_MIN_BLK_SIZE = 0x12,
- GEOMETRY_DESC_PARAM_OPT_RD_BLK_SIZE = 0x13,
- GEOMETRY_DESC_PARAM_OPT_WR_BLK_SIZE = 0x14,
- GEOMETRY_DESC_PARAM_MAX_IN_BUF_SIZE = 0x15,
- GEOMETRY_DESC_PARAM_MAX_OUT_BUF_SIZE = 0x16,
- GEOMETRY_DESC_PARAM_RPMB_RW_SIZE = 0x17,
- GEOMETRY_DESC_PARAM_DYN_CAP_RSRC_PLC = 0x18,
- GEOMETRY_DESC_PARAM_DATA_ORDER = 0x19,
- GEOMETRY_DESC_PARAM_MAX_NUM_CTX = 0x1A,
- GEOMETRY_DESC_PARAM_TAG_UNIT_SIZE = 0x1B,
- GEOMETRY_DESC_PARAM_TAG_RSRC_SIZE = 0x1C,
- GEOMETRY_DESC_PARAM_SEC_RM_TYPES = 0x1D,
- GEOMETRY_DESC_PARAM_MEM_TYPES = 0x1E,
- GEOMETRY_DESC_PARAM_SCM_MAX_NUM_UNITS = 0x20,
- GEOMETRY_DESC_PARAM_SCM_CAP_ADJ_FCTR = 0x24,
- GEOMETRY_DESC_PARAM_NPM_MAX_NUM_UNITS = 0x26,
- GEOMETRY_DESC_PARAM_NPM_CAP_ADJ_FCTR = 0x2A,
- GEOMETRY_DESC_PARAM_ENM1_MAX_NUM_UNITS = 0x2C,
- GEOMETRY_DESC_PARAM_ENM1_CAP_ADJ_FCTR = 0x30,
- GEOMETRY_DESC_PARAM_ENM2_MAX_NUM_UNITS = 0x32,
- GEOMETRY_DESC_PARAM_ENM2_CAP_ADJ_FCTR = 0x36,
- GEOMETRY_DESC_PARAM_ENM3_MAX_NUM_UNITS = 0x38,
- GEOMETRY_DESC_PARAM_ENM3_CAP_ADJ_FCTR = 0x3C,
- GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E,
- GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42,
- GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44,
- GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48,
- GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49,
- GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A,
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B,
- GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F,
- GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53,
- GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54,
- GEOMETRY_DESC_PARAM_WB_SUP_RED_TYPE = 0x55,
- GEOMETRY_DESC_PARAM_WB_SUP_WB_TYPE = 0x56,
-};
-
-/* Health descriptor parameters offsets in bytes*/
-enum health_desc_param {
- HEALTH_DESC_PARAM_LEN = 0x0,
- HEALTH_DESC_PARAM_TYPE = 0x1,
- HEALTH_DESC_PARAM_EOL_INFO = 0x2,
- HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
- HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
-};
-
-/* WriteBooster buffer mode */
-enum {
- WB_BUF_MODE_LU_DEDICATED = 0x0,
- WB_BUF_MODE_SHARED = 0x1,
-};
-
-/*
- * Logical Unit Write Protect
- * 00h: LU not write protected
- * 01h: LU write protected when fPowerOnWPEn =1
- * 02h: LU permanently write protected when fPermanentWPEn =1
- */
-enum ufs_lu_wp_type {
- UFS_LU_NO_WP = 0x00,
- UFS_LU_POWER_ON_WP = 0x01,
- UFS_LU_PERM_WP = 0x02,
-};
-
-/* bActiveICCLevel parameter current units */
-enum {
- UFSHCD_NANO_AMP = 0,
- UFSHCD_MICRO_AMP = 1,
- UFSHCD_MILI_AMP = 2,
- UFSHCD_AMP = 3,
-};
-
-/* Possible values for dExtendedUFSFeaturesSupport */
-enum {
- UFS_DEV_LOW_TEMP_NOTIF = BIT(4),
- UFS_DEV_HIGH_TEMP_NOTIF = BIT(5),
- UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
- UFS_DEV_HPB_SUPPORT = BIT(7),
- UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
-};
-#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
-
-#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
-
-/* Attribute bActiveICCLevel parameter bit masks definitions */
-#define ATTR_ICC_LVL_UNIT_OFFSET 14
-#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
-#define ATTR_ICC_LVL_VALUE_MASK 0x3FF
-
-/* Power descriptor parameters offsets in bytes */
-enum power_desc_param_offset {
- PWR_DESC_LEN = 0x0,
- PWR_DESC_TYPE = 0x1,
- PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2,
- PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22,
- PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42,
-};
-
-/* Exception event mask values */
-enum {
- MASK_EE_STATUS = 0xFFFF,
- MASK_EE_DYNCAP_EVENT = BIT(0),
- MASK_EE_SYSPOOL_EVENT = BIT(1),
- MASK_EE_URGENT_BKOPS = BIT(2),
- MASK_EE_TOO_HIGH_TEMP = BIT(3),
- MASK_EE_TOO_LOW_TEMP = BIT(4),
- MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
- MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
-};
-#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
-
-/* Background operation status */
-enum bkops_status {
- BKOPS_STATUS_NO_OP = 0x0,
- BKOPS_STATUS_NON_CRITICAL = 0x1,
- BKOPS_STATUS_PERF_IMPACT = 0x2,
- BKOPS_STATUS_CRITICAL = 0x3,
- BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
-};
-
-/* UTP QUERY Transaction Specific Fields OpCode */
-enum query_opcode {
- UPIU_QUERY_OPCODE_NOP = 0x0,
- UPIU_QUERY_OPCODE_READ_DESC = 0x1,
- UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
- UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
- UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
- UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
- UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
- UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
- UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
-};
-
-/* bRefClkFreq attribute values */
-enum ufs_ref_clk_freq {
- REF_CLK_FREQ_19_2_MHZ = 0,
- REF_CLK_FREQ_26_MHZ = 1,
- REF_CLK_FREQ_38_4_MHZ = 2,
- REF_CLK_FREQ_52_MHZ = 3,
- REF_CLK_FREQ_INVAL = -1,
-};
-
-struct ufs_ref_clk {
- unsigned long freq_hz;
- enum ufs_ref_clk_freq val;
-};
-
-/* Query response result code */
-enum {
- QUERY_RESULT_SUCCESS = 0x00,
- QUERY_RESULT_NOT_READABLE = 0xF6,
- QUERY_RESULT_NOT_WRITEABLE = 0xF7,
- QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
- QUERY_RESULT_INVALID_LENGTH = 0xF9,
- QUERY_RESULT_INVALID_VALUE = 0xFA,
- QUERY_RESULT_INVALID_SELECTOR = 0xFB,
- QUERY_RESULT_INVALID_INDEX = 0xFC,
- QUERY_RESULT_INVALID_IDN = 0xFD,
- QUERY_RESULT_INVALID_OPCODE = 0xFE,
- QUERY_RESULT_GENERAL_FAILURE = 0xFF,
-};
-
-/* UTP Transfer Request Command Type (CT) */
-enum {
- UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
- UPIU_COMMAND_SET_TYPE_UFS = 0x1,
- UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
-};
-
-/* UTP Transfer Request Command Offset */
-#define UPIU_COMMAND_TYPE_OFFSET 28
-
-/* Offset of the response code in the UPIU header */
-#define UPIU_RSP_CODE_OFFSET 8
-
-enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
- MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_EXCEPTION_EVENT = 0x10000,
- MASK_TM_SERVICE_RESP = 0xFF,
- MASK_TM_FUNC = 0xFF,
-};
-
-/* Task management service response */
-enum {
- UPIU_TASK_MANAGEMENT_FUNC_COMPL = 0x00,
- UPIU_TASK_MANAGEMENT_FUNC_NOT_SUPPORTED = 0x04,
- UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED = 0x08,
- UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
- UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
-};
-
-/* UFS device power modes */
-enum ufs_dev_pwr_mode {
- UFS_ACTIVE_PWR_MODE = 1,
- UFS_SLEEP_PWR_MODE = 2,
- UFS_POWERDOWN_PWR_MODE = 3,
- UFS_DEEPSLEEP_PWR_MODE = 4,
-};
-
-#define UFS_WB_BUF_REMAIN_PERCENT(val) ((val) / 10)
-
-/**
- * struct utp_cmd_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @sense_data: Sense data field DW-8 to DW-12
- */
-struct utp_cmd_rsp {
- __be32 residual_transfer_count;
- __be32 reserved[4];
- __be16 sense_data_len;
- u8 sense_data[UFS_SENSE_SIZE];
-};
-
-struct ufshpb_active_field {
- __be16 active_rgn;
- __be16 active_srgn;
-};
-#define HPB_ACT_FIELD_SIZE 4
-
-/**
- * struct utp_hpb_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved1: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @desc_type: Descriptor type of sense data
- * @additional_len: Additional length of sense data
- * @hpb_op: HPB operation type
- * @lun: LUN of response UPIU
- * @active_rgn_cnt: Active region count
- * @inactive_rgn_cnt: Inactive region count
- * @hpb_active_field: Recommended to read HPB region and subregion
- * @hpb_inactive_field: To be inactivated HPB region and subregion
- */
-struct utp_hpb_rsp {
- __be32 residual_transfer_count;
- __be32 reserved1[4];
- __be16 sense_data_len;
- u8 desc_type;
- u8 additional_len;
- u8 hpb_op;
- u8 lun;
- u8 active_rgn_cnt;
- u8 inactive_rgn_cnt;
- struct ufshpb_active_field hpb_active_field[2];
- __be16 hpb_inactive_field[2];
-};
-#define UTP_HPB_RSP_SIZE 40
-
-/**
- * struct utp_upiu_rsp - general upiu response structure
- * @header: UPIU header structure DW-0 to DW-2
- * @sr: fields structure for scsi command DW-3 to DW-12
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_rsp {
- struct utp_upiu_header header;
- union {
- struct utp_cmd_rsp sr;
- struct utp_hpb_rsp hr;
- struct utp_upiu_query qr;
- };
-};
-
-/**
- * struct ufs_query_req - parameters for building a query request
- * @query_func: UPIU header query function
- * @upiu_req: the query request data
- */
-struct ufs_query_req {
- u8 query_func;
- struct utp_upiu_query upiu_req;
-};
-
-/**
- * struct ufs_query_resp - UPIU QUERY
- * @response: device response code
- * @upiu_res: query response data
- */
-struct ufs_query_res {
- u8 response;
- struct utp_upiu_query upiu_res;
-};
-
-#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */
-#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
-#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
-#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
-#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */
-#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */
-#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */
-#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
-
-/*
- * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
- * and link is in Hibern8 state.
- */
-#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */
-
-struct ufs_vreg {
- struct regulator *reg;
- const char *name;
- bool always_on;
- bool enabled;
- int min_uV;
- int max_uV;
- int max_uA;
-};
-
-struct ufs_vreg_info {
- struct ufs_vreg *vcc;
- struct ufs_vreg *vccq;
- struct ufs_vreg *vccq2;
- struct ufs_vreg *vdd_hba;
-};
-
-struct ufs_dev_info {
- bool f_power_on_wp_en;
- /* Keeps information if any of the LU is power on write protected */
- bool is_lu_power_on_wp;
- /* Maximum number of general LU supported by the UFS device */
- u8 max_lu_supported;
- u16 wmanufacturerid;
- /*UFS device Product Name */
- u8 *model;
- u16 wspecversion;
- u32 clk_gating_wait_us;
-
- /* UFS HPB related flag */
- bool hpb_enabled;
-
- /* UFS WB related flags */
- bool wb_enabled;
- bool wb_buf_flush_enabled;
- u8 wb_dedicated_lu;
- u8 wb_buffer_type;
-
- bool b_rpm_dev_flush_capable;
- u8 b_presrv_uspc_en;
-};
-
-/*
- * This enum is used in string mapping in include/trace/events/ufs.h.
- */
-enum ufs_trace_str_t {
- UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
- UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
- UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
-};
-
-/*
- * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
- * used in include/trace/events/ufs.h for UFS command trace.
- */
-enum ufs_trace_tsf_t {
- UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
-};
-
-/**
- * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
- * @dev_info: pointer of instance of struct ufs_dev_info
- * @lun: LU number to check
- * @return: true if the lun has a matching unit descriptor, false otherwise
- */
-static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
- u8 lun, u8 param_offset)
-{
- if (!dev_info || !dev_info->max_lu_supported) {
- pr_err("Max General LU supported by UFS isn't initialized\n");
- return false;
- }
- /* WB is available only for the logical unit from 0 to 7 */
- if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS)
- return lun < UFS_UPIU_MAX_WB_LUN_ID;
- return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
-}
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
deleted file mode 100644
index 39bf204c6ec3..000000000000
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bsg endpoint that supports UPIUs
- *
- * Copyright (C) 2018 Western Digital Corporation
- */
-#include "ufs_bsg.h"
-
-static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
- struct utp_upiu_query *qr)
-{
- int desc_size = be16_to_cpu(qr->length);
- int desc_id = qr->idn;
-
- if (desc_size <= 0)
- return -EINVAL;
-
- ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
- if (!*desc_len)
- return -EINVAL;
-
- *desc_len = min_t(int, *desc_len, desc_size);
-
- return 0;
-}
-
-static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
- unsigned int request_len,
- unsigned int reply_len)
-{
- int min_req_len = sizeof(struct ufs_bsg_request);
- int min_rsp_len = sizeof(struct ufs_bsg_reply);
-
- if (min_req_len > request_len || min_rsp_len > reply_len) {
- dev_err(hba->dev, "not enough space assigned\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
- uint8_t **desc_buff, int *desc_len,
- enum query_opcode desc_op)
-{
- struct ufs_bsg_request *bsg_request = job->request;
- struct utp_upiu_query *qr;
- u8 *descp;
-
- if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC &&
- desc_op != UPIU_QUERY_OPCODE_READ_DESC)
- goto out;
-
- qr = &bsg_request->upiu_req.qr;
- if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
- dev_err(hba->dev, "Illegal desc size\n");
- return -EINVAL;
- }
-
- if (*desc_len > job->request_payload.payload_len) {
- dev_err(hba->dev, "Illegal desc size\n");
- return -EINVAL;
- }
-
- descp = kzalloc(*desc_len, GFP_KERNEL);
- if (!descp)
- return -ENOMEM;
-
- if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
- sg_copy_to_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt, descp,
- *desc_len);
-
- *desc_buff = descp;
-
-out:
- return 0;
-}
-
-static int ufs_bsg_request(struct bsg_job *job)
-{
- struct ufs_bsg_request *bsg_request = job->request;
- struct ufs_bsg_reply *bsg_reply = job->reply;
- struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
- unsigned int req_len = job->request_len;
- unsigned int reply_len = job->reply_len;
- struct uic_command uc = {};
- int msgcode;
- uint8_t *desc_buff = NULL;
- int desc_len = 0;
- enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
- int ret;
-
- ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
- if (ret)
- goto out;
-
- bsg_reply->reply_payload_rcv_len = 0;
-
- ufshcd_rpm_get_sync(hba);
-
- msgcode = bsg_request->msgcode;
- switch (msgcode) {
- case UPIU_TRANSACTION_QUERY_REQ:
- desc_op = bsg_request->upiu_req.qr.opcode;
- ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
- &desc_len, desc_op);
- if (ret) {
- ufshcd_rpm_put_sync(hba);
- goto out;
- }
-
- fallthrough;
- case UPIU_TRANSACTION_NOP_OUT:
- case UPIU_TRANSACTION_TASK_REQ:
- ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
- &bsg_reply->upiu_rsp, msgcode,
- desc_buff, &desc_len, desc_op);
- if (ret)
- dev_err(hba->dev,
- "exe raw upiu: error code %d\n", ret);
-
- break;
- case UPIU_TRANSACTION_UIC_CMD:
- memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
- ret = ufshcd_send_uic_cmd(hba, &uc);
- if (ret)
- dev_err(hba->dev,
- "send uic cmd: error code %d\n", ret);
-
- memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
-
- break;
- default:
- ret = -ENOTSUPP;
- dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
-
- break;
- }
-
- ufshcd_rpm_put_sync(hba);
-
- if (!desc_buff)
- goto out;
-
- if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
- bsg_reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- desc_buff, desc_len);
-
- kfree(desc_buff);
-
-out:
- bsg_reply->result = ret;
- job->reply_len = sizeof(struct ufs_bsg_reply);
- /* complete the job here only if no error */
- if (ret == 0)
- bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
-
- return ret;
-}
-
-/**
- * ufs_bsg_remove - detach and remove the added ufs-bsg node
- * @hba: per adapter object
- *
- * Should be called when unloading the driver.
- */
-void ufs_bsg_remove(struct ufs_hba *hba)
-{
- struct device *bsg_dev = &hba->bsg_dev;
-
- if (!hba->bsg_queue)
- return;
-
- bsg_remove_queue(hba->bsg_queue);
-
- device_del(bsg_dev);
- put_device(bsg_dev);
-}
-
-static inline void ufs_bsg_node_release(struct device *dev)
-{
- put_device(dev->parent);
-}
-
-/**
- * ufs_bsg_probe - Add ufs bsg device node
- * @hba: per adapter object
- *
- * Called during initial loading of the driver, and before scsi_scan_host.
- */
-int ufs_bsg_probe(struct ufs_hba *hba)
-{
- struct device *bsg_dev = &hba->bsg_dev;
- struct Scsi_Host *shost = hba->host;
- struct device *parent = &shost->shost_gendev;
- struct request_queue *q;
- int ret;
-
- device_initialize(bsg_dev);
-
- bsg_dev->parent = get_device(parent);
- bsg_dev->release = ufs_bsg_node_release;
-
- dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
-
- ret = device_add(bsg_dev);
- if (ret)
- goto out;
-
- q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
- if (IS_ERR(q)) {
- ret = PTR_ERR(q);
- goto out;
- }
-
- hba->bsg_queue = q;
-
- return 0;
-
-out:
- dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
- put_device(bsg_dev);
- return ret;
-}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
deleted file mode 100644
index d09918758631..000000000000
--- a/drivers/scsi/ufs/ufs_bsg.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2018 Western Digital Corporation
- */
-#ifndef UFS_BSG_H
-#define UFS_BSG_H
-
-#include <linux/bsg-lib.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-
-#include "ufshcd.h"
-#include "ufs.h"
-
-#ifdef CONFIG_SCSI_UFS_BSG
-void ufs_bsg_remove(struct ufs_hba *hba);
-int ufs_bsg_probe(struct ufs_hba *hba);
-#else
-static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
-static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
-#endif
-
-#endif /* UFS_BSG_H */
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
deleted file mode 100644
index 35ec9ea79869..000000000000
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _UFS_QUIRKS_H_
-#define _UFS_QUIRKS_H_
-
-/* return true if s1 is a prefix of s2 */
-#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
-
-#define UFS_ANY_VENDOR 0xFFFF
-#define UFS_ANY_MODEL "ANY_MODEL"
-
-#define UFS_VENDOR_MICRON 0x12C
-#define UFS_VENDOR_SAMSUNG 0x1CE
-#define UFS_VENDOR_SKHYNIX 0x1AD
-#define UFS_VENDOR_TOSHIBA 0x198
-#define UFS_VENDOR_WDC 0x145
-
-/**
- * ufs_dev_fix - ufs device quirk info
- * @card: ufs card details
- * @quirk: device quirk
- */
-struct ufs_dev_fix {
- u16 wmanufacturerid;
- u8 *model;
- unsigned int quirk;
-};
-
-#define END_FIX { }
-
-/* add specific device quirk */
-#define UFS_FIX(_vendor, _model, _quirk) { \
- .wmanufacturerid = (_vendor),\
- .model = (_model), \
- .quirk = (_quirk), \
-}
-
-/*
- * Some vendor's UFS device sends back to back NACs for the DL data frames
- * causing the host controller to raise the DFES error status. Sometimes
- * such UFS devices send back to back NAC without waiting for new
- * retransmitted DL frame from the host and in such cases it might be possible
- * the Host UniPro goes into bad state without raising the DFES error
- * interrupt. If this happens then all the pending commands would timeout
- * only after respective SW command (which is generally too large).
- *
- * We can workaround such device behaviour like this:
- * - As soon as SW sees the DL NAC error, it should schedule the error handler
- * - Error handler would sleep for 50ms to see if there are any fatal errors
- * raised by UFS controller.
- * - If there are fatal errors then SW does normal error recovery.
- * - If there are no fatal errors then SW sends the NOP command to device
- * to check if link is alive.
- * - If NOP command times out, SW does normal error recovery
- * - If NOP command succeed, skip the error handling.
- *
- * If DL NAC error is seen multiple times with some vendor's UFS devices then
- * enable this quirk to initiate quick error recovery and also silence related
- * error logs to reduce spamming of kernel logs.
- */
-#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
-
-/*
- * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
- * 600us which may not be enough for reliable hibern8 exit hardware sequence
- * from UFS device.
- * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
- * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
- */
-#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
-
-/*
- * It seems some UFS devices may keep drawing more than sleep current
- * (atleast for 500us) from UFS rails (especially from VCCQ rail).
- * To avoid this situation, add 2ms delay before putting these UFS
- * rails in LPM mode.
- */
-#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
-
-/*
- * Some UFS devices require host PA_TACTIVATE to be lower than device
- * PA_TACTIVATE, enabling this quirk ensure this.
- */
-#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 7)
-
-/*
- * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
- * some vendors.
- * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
- * Gear switch can be issued by host controller as an error recovery and any
- * software delay will not help on this case so we need to increase
- * PA_SaveConfigTime to >32us as per vendor recommendation.
- */
-#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8)
-
-/*
- * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
- * enabling this quirk ensure this.
- */
-#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9)
-
-/*
- * Some pre-3.1 UFS devices can support extended features by upgrading
- * the firmware. Enable this quirk to make UFS core driver probe and enable
- * supported features on such devices.
- */
-#define UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES (1 << 10)
-
-/*
- * Some UFS devices require delay after VCC power rail is turned-off.
- * Enable this quirk to introduce 5ms delays after VCC power-off during
- * suspend flow.
- */
-#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
-
-/*
- * Some UFS devices require L2P entry should be swapped before being sent to the
- * UFS device for HPB READ command.
- */
-#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
-
-#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
deleted file mode 100644
index 67402baf6fae..000000000000
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright 2019 Google LLC
- */
-
-#include "ufshcd.h"
-#include "ufshcd-crypto.h"
-
-/* Blk-crypto modes supported by UFS crypto */
-static const struct ufs_crypto_alg_entry {
- enum ufs_crypto_alg ufs_alg;
- enum ufs_crypto_key_size ufs_key_size;
-} ufs_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = {
- [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
- .ufs_alg = UFS_CRYPTO_ALG_AES_XTS,
- .ufs_key_size = UFS_CRYPTO_KEY_SIZE_256,
- },
-};
-
-static int ufshcd_program_key(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot)
-{
- int i;
- u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
- int err = 0;
-
- ufshcd_hold(hba, false);
-
- if (hba->vops && hba->vops->program_key) {
- err = hba->vops->program_key(hba, cfg, slot);
- goto out;
- }
-
- /* Ensure that CFGE is cleared before programming the key */
- ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
- for (i = 0; i < 16; i++) {
- ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]),
- slot_offset + i * sizeof(cfg->reg_val[0]));
- }
- /* Write dword 17 */
- ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]),
- slot_offset + 17 * sizeof(cfg->reg_val[0]));
- /* Dword 16 must be written last */
- ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]),
- slot_offset + 16 * sizeof(cfg->reg_val[0]));
-out:
- ufshcd_release(hba);
- return err;
-}
-
-static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
- const struct blk_crypto_key *key,
- unsigned int slot)
-{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
- const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
- const struct ufs_crypto_alg_entry *alg =
- &ufs_crypto_algs[key->crypto_cfg.crypto_mode];
- u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512;
- int i;
- int cap_idx = -1;
- union ufs_crypto_cfg_entry cfg = {};
- int err;
-
- BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
- for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) {
- if (ccap_array[i].algorithm_id == alg->ufs_alg &&
- ccap_array[i].key_size == alg->ufs_key_size &&
- (ccap_array[i].sdus_mask & data_unit_mask)) {
- cap_idx = i;
- break;
- }
- }
-
- if (WARN_ON(cap_idx < 0))
- return -EOPNOTSUPP;
-
- cfg.data_unit_size = data_unit_mask;
- cfg.crypto_cap_idx = cap_idx;
- cfg.config_enable = UFS_CRYPTO_CONFIGURATION_ENABLE;
-
- if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) {
- /* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
- memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
- } else {
- memcpy(cfg.crypto_key, key->raw, key->size);
- }
-
- err = ufshcd_program_key(hba, &cfg, slot);
-
- memzero_explicit(&cfg, sizeof(cfg));
- return err;
-}
-
-static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
-{
- /*
- * Clear the crypto cfg on the device. Clearing CFGE
- * might not be sufficient, so just clear the entire cfg.
- */
- union ufs_crypto_cfg_entry cfg = {};
-
- return ufshcd_program_key(hba, &cfg, slot);
-}
-
-static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
- const struct blk_crypto_key *key,
- unsigned int slot)
-{
- struct ufs_hba *hba =
- container_of(profile, struct ufs_hba, crypto_profile);
-
- return ufshcd_clear_keyslot(hba, slot);
-}
-
-bool ufshcd_crypto_enable(struct ufs_hba *hba)
-{
- if (!(hba->caps & UFSHCD_CAP_CRYPTO))
- return false;
-
- /* Reset might clear all keys, so reprogram all the keys. */
- blk_crypto_reprogram_all_keys(&hba->crypto_profile);
- return true;
-}
-
-static const struct blk_crypto_ll_ops ufshcd_crypto_ops = {
- .keyslot_program = ufshcd_crypto_keyslot_program,
- .keyslot_evict = ufshcd_crypto_keyslot_evict,
-};
-
-static enum blk_crypto_mode_num
-ufshcd_find_blk_crypto_mode(union ufs_crypto_cap_entry cap)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ufs_crypto_algs); i++) {
- BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
- if (ufs_crypto_algs[i].ufs_alg == cap.algorithm_id &&
- ufs_crypto_algs[i].ufs_key_size == cap.key_size) {
- return i;
- }
- }
- return BLK_ENCRYPTION_MODE_INVALID;
-}
-
-/**
- * ufshcd_hba_init_crypto_capabilities - Read crypto capabilities, init crypto
- * fields in hba
- * @hba: Per adapter instance
- *
- * Return: 0 if crypto was initialized or is not supported, else a -errno value.
- */
-int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
-{
- int cap_idx;
- int err = 0;
- enum blk_crypto_mode_num blk_mode_num;
-
- /*
- * Don't use crypto if either the hardware doesn't advertise the
- * standard crypto capability bit *or* if the vendor specific driver
- * hasn't advertised that crypto is supported.
- */
- if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) ||
- !(hba->caps & UFSHCD_CAP_CRYPTO))
- goto out;
-
- hba->crypto_capabilities.reg_val =
- cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
- hba->crypto_cfg_register =
- (u32)hba->crypto_capabilities.config_array_ptr * 0x100;
- hba->crypto_cap_array =
- devm_kcalloc(hba->dev, hba->crypto_capabilities.num_crypto_cap,
- sizeof(hba->crypto_cap_array[0]), GFP_KERNEL);
- if (!hba->crypto_cap_array) {
- err = -ENOMEM;
- goto out;
- }
-
- /* The actual number of configurations supported is (CFGC+1) */
- err = devm_blk_crypto_profile_init(
- hba->dev, &hba->crypto_profile,
- hba->crypto_capabilities.config_count + 1);
- if (err)
- goto out;
-
- hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
- /* UFS only supports 8 bytes for any DUN */
- hba->crypto_profile.max_dun_bytes_supported = 8;
- hba->crypto_profile.dev = hba->dev;
-
- /*
- * Cache all the UFS crypto capabilities and advertise the supported
- * crypto modes and data unit sizes to the block layer.
- */
- for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap;
- cap_idx++) {
- hba->crypto_cap_array[cap_idx].reg_val =
- cpu_to_le32(ufshcd_readl(hba,
- REG_UFS_CRYPTOCAP +
- cap_idx * sizeof(__le32)));
- blk_mode_num = ufshcd_find_blk_crypto_mode(
- hba->crypto_cap_array[cap_idx]);
- if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID)
- hba->crypto_profile.modes_supported[blk_mode_num] |=
- hba->crypto_cap_array[cap_idx].sdus_mask * 512;
- }
-
- return 0;
-
-out:
- /* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */
- hba->caps &= ~UFSHCD_CAP_CRYPTO;
- return err;
-}
-
-/**
- * ufshcd_init_crypto - Initialize crypto hardware
- * @hba: Per adapter instance
- */
-void ufshcd_init_crypto(struct ufs_hba *hba)
-{
- int slot;
-
- if (!(hba->caps & UFSHCD_CAP_CRYPTO))
- return;
-
- /* Clear all keyslots - the number of keyslots is (CFGC + 1) */
- for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++)
- ufshcd_clear_keyslot(hba, slot);
-}
-
-void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
-{
- if (hba->caps & UFSHCD_CAP_CRYPTO)
- blk_crypto_register(&hba->crypto_profile, q);
-}
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h
deleted file mode 100644
index e18c01276873..000000000000
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2019 Google LLC
- */
-
-#ifndef _UFSHCD_CRYPTO_H
-#define _UFSHCD_CRYPTO_H
-
-#ifdef CONFIG_SCSI_UFS_CRYPTO
-#include "ufshcd.h"
-#include "ufshci.h"
-
-static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
- struct ufshcd_lrb *lrbp)
-{
- if (!rq || !rq->crypt_keyslot) {
- lrbp->crypto_key_slot = -1;
- return;
- }
-
- lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot);
- lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0];
-}
-
-static inline void
-ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
- u32 *dword_1, u32 *dword_3)
-{
- if (lrbp->crypto_key_slot >= 0) {
- *dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
- *dword_0 |= lrbp->crypto_key_slot;
- *dword_1 = lower_32_bits(lrbp->data_unit_num);
- *dword_3 = upper_32_bits(lrbp->data_unit_num);
- }
-}
-
-bool ufshcd_crypto_enable(struct ufs_hba *hba);
-
-int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba);
-
-void ufshcd_init_crypto(struct ufs_hba *hba);
-
-void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q);
-
-#else /* CONFIG_SCSI_UFS_CRYPTO */
-
-static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
- struct ufshcd_lrb *lrbp) { }
-
-static inline void
-ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
- u32 *dword_1, u32 *dword_3) { }
-
-static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
-{
- return false;
-}
-
-static inline int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
-{
- return 0;
-}
-
-static inline void ufshcd_init_crypto(struct ufs_hba *hba) { }
-
-static inline void ufshcd_crypto_register(struct ufs_hba *hba,
- struct request_queue *q) { }
-
-#endif /* CONFIG_SCSI_UFS_CRYPTO */
-
-#endif /* _UFSHCD_CRYPTO_H */
diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c
deleted file mode 100644
index 5bb9d3a88795..000000000000
--- a/drivers/scsi/ufs/ufshcd-dwc.c
+++ /dev/null
@@ -1,148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * UFS Host driver for Synopsys Designware Core
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#include "ufshcd.h"
-#include "unipro.h"
-
-#include "ufshcd-dwc.h"
-#include "ufshci-dwc.h"
-
-int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
- const struct ufshcd_dme_attr_val *v, int n)
-{
- int ret = 0;
- int attr_node = 0;
-
- for (attr_node = 0; attr_node < n; attr_node++) {
- ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel,
- ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs);
-
-/**
- * ufshcd_dwc_program_clk_div()
- * This function programs the clk divider value. This value is needed to
- * provide 1 microsecond tick to unipro layer.
- * @hba: Private Structure pointer
- * @divider_val: clock divider value to be programmed
- *
- */
-static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
-{
- ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV);
-}
-
-/**
- * ufshcd_dwc_link_is_up()
- * Check if link is up
- * @hba: private structure pointer
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
-{
- int dme_result = 0;
-
- ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result);
-
- if (dme_result == UFSHCD_LINK_IS_UP) {
- ufshcd_set_link_active(hba);
- return 0;
- }
-
- return 1;
-}
-
-/**
- * ufshcd_dwc_connection_setup()
- * This function configures both the local side (host) and the peer side
- * (device) unipro attributes to establish the connection to application/
- * cport.
- * This function is not required if the hardware is properly configured to
- * have this connection setup on reset. But invoking this function does no
- * harm and should be fine even working with any ufs device.
- *
- * @hba: pointer to drivers private data
- *
- * Returns 0 on success non-zero value on failure
- */
-static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
-{
- static const struct ufshcd_dme_attr_val setup_attrs[] = {
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
- { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
- { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
- { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL },
- { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL },
- { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL },
- { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL },
- { UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL },
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL },
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER },
- { UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER },
- { UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER },
- { UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER },
- { UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER },
- { UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER },
- { UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER },
- { UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER },
- { UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER }
- };
-
- return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs));
-}
-
-/**
- * ufshcd_dwc_link_startup_notify()
- * UFS Host DWC specific link startup sequence
- * @hba: private structure pointer
- * @status: Callback notify status
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
-
- if (status == PRE_CHANGE) {
- ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125);
-
- err = ufshcd_vops_phy_initialization(hba);
- if (err) {
- dev_err(hba->dev, "Phy setup failed (%d)\n", err);
- goto out;
- }
- } else { /* POST_CHANGE */
- err = ufshcd_dwc_link_is_up(hba);
- if (err) {
- dev_err(hba->dev, "Link is not up\n");
- goto out;
- }
-
- err = ufshcd_dwc_connection_setup(hba);
- if (err)
- dev_err(hba->dev, "Connection setup failed (%d)\n",
- err);
- }
-
-out:
- return err;
-}
-EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify);
-
-MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
-MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/scsi/ufs/ufshcd-dwc.h b/drivers/scsi/ufs/ufshcd-dwc.h
deleted file mode 100644
index 4268ca2eb64c..000000000000
--- a/drivers/scsi/ufs/ufshcd-dwc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UFS Host driver for Synopsys Designware Core
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#ifndef _UFSHCD_DWC_H
-#define _UFSHCD_DWC_H
-
-struct ufshcd_dme_attr_val {
- u32 attr_sel;
- u32 mib_val;
- u8 peer;
-};
-
-int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status);
-int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
- const struct ufshcd_dme_attr_val *v, int n);
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
deleted file mode 100644
index f725248ba57f..000000000000
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ /dev/null
@@ -1,606 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Universal Flash Storage Host controller PCI glue driver
- *
- * This code is based on drivers/scsi/ufs/ufshcd-pci.c
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#include "ufshcd.h"
-#include <linux/pci.h>
-#include <linux/pm_runtime.h>
-#include <linux/pm_qos.h>
-#include <linux/debugfs.h>
-#include <linux/uuid.h>
-#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
-
-struct ufs_host {
- void (*late_init)(struct ufs_hba *hba);
-};
-
-enum {
- INTEL_DSM_FNS = 0,
- INTEL_DSM_RESET = 1,
-};
-
-struct intel_host {
- struct ufs_host ufs_host;
- u32 dsm_fns;
- u32 active_ltr;
- u32 idle_ltr;
- struct dentry *debugfs_root;
- struct gpio_desc *reset_gpio;
-};
-
-static const guid_t intel_dsm_guid =
- GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
- 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
-
-static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
- unsigned int fn, u32 *result)
-{
- union acpi_object *obj;
- int err = 0;
- size_t len;
-
- obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
- if (!obj)
- return -EOPNOTSUPP;
-
- if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
- err = -EINVAL;
- goto out;
- }
-
- len = min_t(size_t, obj->buffer.length, 4);
-
- *result = 0;
- memcpy(result, obj->buffer.pointer, len);
-out:
- ACPI_FREE(obj);
-
- return err;
-}
-
-static int intel_dsm(struct intel_host *intel_host, struct device *dev,
- unsigned int fn, u32 *result)
-{
- if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
- return -EOPNOTSUPP;
-
- return __intel_dsm(intel_host, dev, fn, result);
-}
-
-static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
-{
- int err;
-
- err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
- dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
-}
-
-static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- /* Cannot enable ICE until after HC enable */
- if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
- u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
-
- hce |= CRYPTO_GENERAL_ENABLE;
- ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
- }
-
- return 0;
-}
-
-static int ufs_intel_disable_lcc(struct ufs_hba *hba)
-{
- u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
- u32 lcc_enable = 0;
-
- ufshcd_dme_get(hba, attr, &lcc_enable);
- if (lcc_enable)
- ufshcd_disable_host_tx_lcc(hba);
-
- return 0;
-}
-
-static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
-{
- int err = 0;
-
- switch (status) {
- case PRE_CHANGE:
- err = ufs_intel_disable_lcc(hba);
- break;
- case POST_CHANGE:
- break;
- default:
- break;
- }
-
- return err;
-}
-
-static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
-{
- struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
- int ret;
-
- pwr_info.lane_rx = lanes;
- pwr_info.lane_tx = lanes;
- ret = ufshcd_config_pwr_mode(hba, &pwr_info);
- if (ret)
- dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
- __func__, lanes, ret);
- return ret;
-}
-
-static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- int err = 0;
-
- switch (status) {
- case PRE_CHANGE:
- if (ufshcd_is_hs_mode(dev_max_params) &&
- (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
- ufs_intel_set_lanes(hba, 2);
- memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
- break;
- case POST_CHANGE:
- if (ufshcd_is_hs_mode(dev_req_params)) {
- u32 peer_granularity;
-
- usleep_range(1000, 1250);
- err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &peer_granularity);
- }
- break;
- default:
- break;
- }
-
- return err;
-}
-
-static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
-{
- u32 granularity, peer_granularity;
- u32 pa_tactivate, peer_pa_tactivate;
- int ret;
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
- if (ret)
- goto out;
-
- if (granularity == peer_granularity) {
- u32 new_peer_pa_tactivate = pa_tactivate + 2;
-
- ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
- }
-out:
- return ret;
-}
-
-#define INTEL_ACTIVELTR 0x804
-#define INTEL_IDLELTR 0x808
-
-#define INTEL_LTR_REQ BIT(15)
-#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
-#define INTEL_LTR_SCALE_1US (2 << 10)
-#define INTEL_LTR_SCALE_32US (3 << 10)
-#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
-
-static void intel_cache_ltr(struct ufs_hba *hba)
-{
- struct intel_host *host = ufshcd_get_variant(hba);
-
- host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
- host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
-}
-
-static void intel_ltr_set(struct device *dev, s32 val)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct intel_host *host = ufshcd_get_variant(hba);
- u32 ltr;
-
- pm_runtime_get_sync(dev);
-
- /*
- * Program latency tolerance (LTR) accordingly what has been asked
- * by the PM QoS layer or disable it in case we were passed
- * negative value or PM_QOS_LATENCY_ANY.
- */
- ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
-
- if (val == PM_QOS_LATENCY_ANY || val < 0) {
- ltr &= ~INTEL_LTR_REQ;
- } else {
- ltr |= INTEL_LTR_REQ;
- ltr &= ~INTEL_LTR_SCALE_MASK;
- ltr &= ~INTEL_LTR_VALUE_MASK;
-
- if (val > INTEL_LTR_VALUE_MASK) {
- val >>= 5;
- if (val > INTEL_LTR_VALUE_MASK)
- val = INTEL_LTR_VALUE_MASK;
- ltr |= INTEL_LTR_SCALE_32US | val;
- } else {
- ltr |= INTEL_LTR_SCALE_1US | val;
- }
- }
-
- if (ltr == host->active_ltr)
- goto out;
-
- writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
- writel(ltr, hba->mmio_base + INTEL_IDLELTR);
-
- /* Cache the values into intel_host structure */
- intel_cache_ltr(hba);
-out:
- pm_runtime_put(dev);
-}
-
-static void intel_ltr_expose(struct device *dev)
-{
- dev->power.set_latency_tolerance = intel_ltr_set;
- dev_pm_qos_expose_latency_tolerance(dev);
-}
-
-static void intel_ltr_hide(struct device *dev)
-{
- dev_pm_qos_hide_latency_tolerance(dev);
- dev->power.set_latency_tolerance = NULL;
-}
-
-static void intel_add_debugfs(struct ufs_hba *hba)
-{
- struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
- struct intel_host *host = ufshcd_get_variant(hba);
-
- intel_cache_ltr(hba);
-
- host->debugfs_root = dir;
- debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
- debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
-}
-
-static void intel_remove_debugfs(struct ufs_hba *hba)
-{
- struct intel_host *host = ufshcd_get_variant(hba);
-
- debugfs_remove_recursive(host->debugfs_root);
-}
-
-static int ufs_intel_device_reset(struct ufs_hba *hba)
-{
- struct intel_host *host = ufshcd_get_variant(hba);
-
- if (host->dsm_fns & INTEL_DSM_RESET) {
- u32 result = 0;
- int err;
-
- err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
- if (!err && !result)
- err = -EIO;
- if (err)
- dev_err(hba->dev, "%s: DSM error %d result %u\n",
- __func__, err, result);
- return err;
- }
-
- if (!host->reset_gpio)
- return -EOPNOTSUPP;
-
- gpiod_set_value_cansleep(host->reset_gpio, 1);
- usleep_range(10, 15);
-
- gpiod_set_value_cansleep(host->reset_gpio, 0);
- usleep_range(10, 15);
-
- return 0;
-}
-
-static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
-{
- /* GPIO in _DSD has active low setting */
- return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
-}
-
-static int ufs_intel_common_init(struct ufs_hba *hba)
-{
- struct intel_host *host;
-
- hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
-
- host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
- if (!host)
- return -ENOMEM;
- ufshcd_set_variant(hba, host);
- intel_dsm_init(host, hba->dev);
- if (host->dsm_fns & INTEL_DSM_RESET) {
- if (hba->vops->device_reset)
- hba->caps |= UFSHCD_CAP_DEEPSLEEP;
- } else {
- if (hba->vops->device_reset)
- host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
- if (IS_ERR(host->reset_gpio)) {
- dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
- __func__, PTR_ERR(host->reset_gpio));
- host->reset_gpio = NULL;
- }
- if (host->reset_gpio) {
- gpiod_set_value_cansleep(host->reset_gpio, 0);
- hba->caps |= UFSHCD_CAP_DEEPSLEEP;
- }
- }
- intel_ltr_expose(hba->dev);
- intel_add_debugfs(hba);
- return 0;
-}
-
-static void ufs_intel_common_exit(struct ufs_hba *hba)
-{
- intel_remove_debugfs(hba);
- intel_ltr_hide(hba->dev);
-}
-
-static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
-{
- if (ufshcd_is_link_hibern8(hba)) {
- int ret = ufshcd_uic_hibern8_exit(hba);
-
- if (!ret) {
- ufshcd_set_link_active(hba);
- } else {
- dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
- __func__, ret);
- /*
- * Force reset and restore. Any other actions can lead
- * to an unrecoverable state.
- */
- ufshcd_set_link_off(hba);
- }
- }
-
- return 0;
-}
-
-static int ufs_intel_ehl_init(struct ufs_hba *hba)
-{
- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- return ufs_intel_common_init(hba);
-}
-
-static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
-{
- /* LKF always needs a full reset, so set PM accordingly */
- if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
- hba->spm_lvl = UFS_PM_LVL_6;
- hba->rpm_lvl = UFS_PM_LVL_6;
- } else {
- hba->spm_lvl = UFS_PM_LVL_5;
- hba->rpm_lvl = UFS_PM_LVL_5;
- }
-}
-
-static int ufs_intel_lkf_init(struct ufs_hba *hba)
-{
- struct ufs_host *ufs_host;
- int err;
-
- hba->nop_out_timeout = 200;
- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- hba->caps |= UFSHCD_CAP_CRYPTO;
- err = ufs_intel_common_init(hba);
- ufs_host = ufshcd_get_variant(hba);
- ufs_host->late_init = ufs_intel_lkf_late_init;
- return err;
-}
-
-static int ufs_intel_adl_init(struct ufs_hba *hba)
-{
- hba->nop_out_timeout = 200;
- hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- return ufs_intel_common_init(hba);
-}
-
-static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
- .name = "intel-pci",
- .init = ufs_intel_common_init,
- .exit = ufs_intel_common_exit,
- .link_startup_notify = ufs_intel_link_startup_notify,
- .resume = ufs_intel_resume,
-};
-
-static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
- .name = "intel-pci",
- .init = ufs_intel_ehl_init,
- .exit = ufs_intel_common_exit,
- .link_startup_notify = ufs_intel_link_startup_notify,
- .resume = ufs_intel_resume,
-};
-
-static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
- .name = "intel-pci",
- .init = ufs_intel_lkf_init,
- .exit = ufs_intel_common_exit,
- .hce_enable_notify = ufs_intel_hce_enable_notify,
- .link_startup_notify = ufs_intel_link_startup_notify,
- .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
- .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
- .resume = ufs_intel_resume,
- .device_reset = ufs_intel_device_reset,
-};
-
-static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
- .name = "intel-pci",
- .init = ufs_intel_adl_init,
- .exit = ufs_intel_common_exit,
- .link_startup_notify = ufs_intel_link_startup_notify,
- .resume = ufs_intel_resume,
- .device_reset = ufs_intel_device_reset,
-};
-
-#ifdef CONFIG_PM_SLEEP
-static int ufshcd_pci_restore(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- /* Force a full reset and restore */
- ufshcd_set_link_off(hba);
-
- return ufshcd_system_resume(dev);
-}
-#endif
-
-/**
- * ufshcd_pci_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_pci_shutdown(struct pci_dev *pdev)
-{
- ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-/**
- * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
- * data structure memory
- * @pdev: pointer to PCI handle
- */
-static void ufshcd_pci_remove(struct pci_dev *pdev)
-{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
- ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
-}
-
-/**
- * ufshcd_pci_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int
-ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ufs_host *ufs_host;
- struct ufs_hba *hba;
- void __iomem *mmio_base;
- int err;
-
- err = pcim_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pcim_enable_device failed\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request and iomap failed\n");
- return err;
- }
-
- mmio_base = pcim_iomap_table(pdev)[0];
-
- err = ufshcd_alloc_host(&pdev->dev, &hba);
- if (err) {
- dev_err(&pdev->dev, "Allocation failed\n");
- return err;
- }
-
- pci_set_drvdata(pdev, hba);
-
- hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
-
- err = ufshcd_init(hba, mmio_base, pdev->irq);
- if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- ufshcd_dealloc_host(hba);
- return err;
- }
-
- ufs_host = ufshcd_get_variant(hba);
- if (ufs_host && ufs_host->late_init)
- ufs_host->late_init(hba);
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
-#ifdef CONFIG_PM_SLEEP
- .suspend = ufshcd_system_suspend,
- .resume = ufshcd_system_resume,
- .freeze = ufshcd_system_suspend,
- .thaw = ufshcd_system_resume,
- .poweroff = ufshcd_system_suspend,
- .restore = ufshcd_pci_restore,
- .prepare = ufshcd_suspend_prepare,
- .complete = ufshcd_resume_complete,
-#endif
-};
-
-static const struct pci_device_id ufshcd_pci_tbl[] = {
- { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
- { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
- { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
- .name = UFSHCD,
- .id_table = ufshcd_pci_tbl,
- .probe = ufshcd_pci_probe,
- .remove = ufshcd_pci_remove,
- .shutdown = ufshcd_pci_shutdown,
- .driver = {
- .pm = &ufshcd_pci_pm_ops
- },
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
-MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("UFS host controller PCI glue driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
deleted file mode 100644
index eaeae83b999f..000000000000
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ /dev/null
@@ -1,382 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Universal Flash Storage Host controller Platform bus based glue driver
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/of.h>
-
-#include "ufshcd.h"
-#include "ufshcd-pltfrm.h"
-#include "unipro.h"
-
-#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
-
-static int ufshcd_parse_clock_info(struct ufs_hba *hba)
-{
- int ret = 0;
- int cnt;
- int i;
- struct device *dev = hba->dev;
- struct device_node *np = dev->of_node;
- char *name;
- u32 *clkfreq = NULL;
- struct ufs_clk_info *clki;
- int len = 0;
- size_t sz = 0;
-
- if (!np)
- goto out;
-
- cnt = of_property_count_strings(np, "clock-names");
- if (!cnt || (cnt == -EINVAL)) {
- dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
- __func__);
- } else if (cnt < 0) {
- dev_err(dev, "%s: count clock strings failed, err %d\n",
- __func__, cnt);
- ret = cnt;
- }
-
- if (cnt <= 0)
- goto out;
-
- if (!of_get_property(np, "freq-table-hz", &len)) {
- dev_info(dev, "freq-table-hz property not specified\n");
- goto out;
- }
-
- if (len <= 0)
- goto out;
-
- sz = len / sizeof(*clkfreq);
- if (sz != 2 * cnt) {
- dev_err(dev, "%s len mismatch\n", "freq-table-hz");
- ret = -EINVAL;
- goto out;
- }
-
- clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
- GFP_KERNEL);
- if (!clkfreq) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = of_property_read_u32_array(np, "freq-table-hz",
- clkfreq, sz);
- if (ret && (ret != -EINVAL)) {
- dev_err(dev, "%s: error reading array %d\n",
- "freq-table-hz", ret);
- return ret;
- }
-
- for (i = 0; i < sz; i += 2) {
- ret = of_property_read_string_index(np,
- "clock-names", i/2, (const char **)&name);
- if (ret)
- goto out;
-
- clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
- if (!clki) {
- ret = -ENOMEM;
- goto out;
- }
-
- clki->min_freq = clkfreq[i];
- clki->max_freq = clkfreq[i+1];
- clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
- if (!strcmp(name, "ref_clk"))
- clki->keep_link_active = true;
- dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
- clki->min_freq, clki->max_freq, clki->name);
- list_add_tail(&clki->list, &hba->clk_list_head);
- }
-out:
- return ret;
-}
-
-#define MAX_PROP_SIZE 32
-static int ufshcd_populate_vreg(struct device *dev, const char *name,
- struct ufs_vreg **out_vreg)
-{
- char prop_name[MAX_PROP_SIZE];
- struct ufs_vreg *vreg = NULL;
- struct device_node *np = dev->of_node;
-
- if (!np) {
- dev_err(dev, "%s: non DT initialization\n", __func__);
- goto out;
- }
-
- snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
- if (!of_parse_phandle(np, prop_name, 0)) {
- dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
- __func__, prop_name);
- goto out;
- }
-
- vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
- if (!vreg)
- return -ENOMEM;
-
- vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
-
- snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
- if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
- dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
- vreg->max_uA = 0;
- }
-out:
- *out_vreg = vreg;
- return 0;
-}
-
-/**
- * ufshcd_parse_regulator_info - get regulator info from device tree
- * @hba: per adapter instance
- *
- * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
- * If any of the supplies are not defined it is assumed that they are always-on
- * and hence return zero. If the property is defined but parsing is failed
- * then return corresponding error.
- */
-static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
-{
- int err;
- struct device *dev = hba->dev;
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
- if (err)
- goto out;
-
- err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
- if (err)
- goto out;
-
- err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
- if (err)
- goto out;
-
- err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
-out:
- return err;
-}
-
-void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
-{
- ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
-
-static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
-{
- struct device *dev = hba->dev;
- int ret;
-
- ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
- &hba->lanes_per_direction);
- if (ret) {
- dev_dbg(hba->dev,
- "%s: failed to read lanes-per-direction, ret=%d\n",
- __func__, ret);
- hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
- }
-}
-
-/**
- * ufshcd_get_pwr_dev_param - get finally agreed attributes for
- * power mode change
- * @pltfrm_param: pointer to platform parameters
- * @dev_max: pointer to device attributes
- * @agreed_pwr: returned agreed attributes
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
- struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr)
-{
- int min_pltfrm_gear;
- int min_dev_gear;
- bool is_dev_sup_hs = false;
- bool is_pltfrm_max_hs = false;
-
- if (dev_max->pwr_rx == FAST_MODE)
- is_dev_sup_hs = true;
-
- if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
- is_pltfrm_max_hs = true;
- min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
- pltfrm_param->hs_tx_gear);
- } else {
- min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
- pltfrm_param->pwm_tx_gear);
- }
-
- /*
- * device doesn't support HS but
- * pltfrm_param->desired_working_mode is HS,
- * thus device and pltfrm_param don't agree
- */
- if (!is_dev_sup_hs && is_pltfrm_max_hs) {
- pr_info("%s: device doesn't support HS\n",
- __func__);
- return -ENOTSUPP;
- } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
- /*
- * since device supports HS, it supports FAST_MODE.
- * since pltfrm_param->desired_working_mode is also HS
- * then final decision (FAST/FASTAUTO) is done according
- * to pltfrm_params as it is the restricting factor
- */
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
- agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
- } else {
- /*
- * here pltfrm_param->desired_working_mode is PWM.
- * it doesn't matter whether device supports HS or PWM,
- * in both cases pltfrm_param->desired_working_mode will
- * determine the mode
- */
- agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
- agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
- }
-
- /*
- * we would like tx to work in the minimum number of lanes
- * between device capability and vendor preferences.
- * the same decision will be made for rx
- */
- agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
- pltfrm_param->tx_lanes);
- agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
- pltfrm_param->rx_lanes);
-
- /* device maximum gear is the minimum between device rx and tx gears */
- min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
-
- /*
- * if both device capabilities and vendor pre-defined preferences are
- * both HS or both PWM then set the minimum gear to be the chosen
- * working gear.
- * if one is PWM and one is HS then the one that is PWM get to decide
- * what is the gear, as it is the one that also decided previously what
- * pwr the device will be configured to.
- */
- if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
- (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
- agreed_pwr->gear_rx =
- min_t(u32, min_dev_gear, min_pltfrm_gear);
- } else if (!is_dev_sup_hs) {
- agreed_pwr->gear_rx = min_dev_gear;
- } else {
- agreed_pwr->gear_rx = min_pltfrm_gear;
- }
- agreed_pwr->gear_tx = agreed_pwr->gear_rx;
-
- agreed_pwr->hs_rate = pltfrm_param->hs_rate;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
-
-void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
-{
- dev_param->tx_lanes = 2;
- dev_param->rx_lanes = 2;
- dev_param->hs_rx_gear = UFS_HS_G3;
- dev_param->hs_tx_gear = UFS_HS_G3;
- dev_param->pwm_rx_gear = UFS_PWM_G4;
- dev_param->pwm_tx_gear = UFS_PWM_G4;
- dev_param->rx_pwr_pwm = SLOW_MODE;
- dev_param->tx_pwr_pwm = SLOW_MODE;
- dev_param->rx_pwr_hs = FAST_MODE;
- dev_param->tx_pwr_hs = FAST_MODE;
- dev_param->hs_rate = PA_HS_MODE_B;
- dev_param->desired_working_mode = UFS_HS_MODE;
-}
-EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
-
-/**
- * ufshcd_pltfrm_init - probe routine of the driver
- * @pdev: pointer to Platform device handle
- * @vops: pointer to variant ops
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_pltfrm_init(struct platform_device *pdev,
- const struct ufs_hba_variant_ops *vops)
-{
- struct ufs_hba *hba;
- void __iomem *mmio_base;
- int irq, err;
- struct device *dev = &pdev->dev;
-
- mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base)) {
- err = PTR_ERR(mmio_base);
- goto out;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto out;
- }
-
- err = ufshcd_alloc_host(dev, &hba);
- if (err) {
- dev_err(&pdev->dev, "Allocation failed\n");
- goto out;
- }
-
- hba->vops = vops;
-
- err = ufshcd_parse_clock_info(hba);
- if (err) {
- dev_err(&pdev->dev, "%s: clock parse failed %d\n",
- __func__, err);
- goto dealloc_host;
- }
- err = ufshcd_parse_regulator_info(hba);
- if (err) {
- dev_err(&pdev->dev, "%s: regulator init failed %d\n",
- __func__, err);
- goto dealloc_host;
- }
-
- ufshcd_init_lanes_per_dir(hba);
-
- err = ufshcd_init(hba, mmio_base, irq);
- if (err) {
- dev_err(dev, "Initialization failed\n");
- goto dealloc_host;
- }
-
- platform_set_drvdata(pdev, hba);
-
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-
-dealloc_host:
- ufshcd_dealloc_host(hba);
-out:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
-MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
deleted file mode 100644
index c33e28ac6ef6..000000000000
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef UFSHCD_PLTFRM_H_
-#define UFSHCD_PLTFRM_H_
-
-#include "ufshcd.h"
-
-#define UFS_PWM_MODE 1
-#define UFS_HS_MODE 2
-
-struct ufs_dev_params {
- u32 pwm_rx_gear; /* pwm rx gear to work in */
- u32 pwm_tx_gear; /* pwm tx gear to work in */
- u32 hs_rx_gear; /* hs rx gear to work in */
- u32 hs_tx_gear; /* hs tx gear to work in */
- u32 rx_lanes; /* number of rx lanes */
- u32 tx_lanes; /* number of tx lanes */
- u32 rx_pwr_pwm; /* rx pwm working pwr */
- u32 tx_pwr_pwm; /* tx pwm working pwr */
- u32 rx_pwr_hs; /* rx hs working pwr */
- u32 tx_pwr_hs; /* tx hs working pwr */
- u32 hs_rate; /* rate A/B to work in HS */
- u32 desired_working_mode;
-};
-
-int ufshcd_get_pwr_dev_param(struct ufs_dev_params *dev_param,
- struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr);
-void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param);
-int ufshcd_pltfrm_init(struct platform_device *pdev,
- const struct ufs_hba_variant_ops *vops);
-void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
-
-#endif /* UFSHCD_PLTFRM_H_ */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
deleted file mode 100644
index 13c09dbd99b9..000000000000
--- a/drivers/scsi/ufs/ufshcd.c
+++ /dev/null
@@ -1,9823 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Universal Flash Storage Host controller driver Core
- * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#include <linux/async.h>
-#include <linux/devfreq.h>
-#include <linux/nls.h>
-#include <linux/of.h>
-#include <linux/bitfield.h>
-#include <linux/blk-pm.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi_driver.h>
-#include "ufshcd.h"
-#include "ufs_quirks.h"
-#include "unipro.h"
-#include "ufs-sysfs.h"
-#include "ufs-debugfs.h"
-#include "ufs-fault-injection.h"
-#include "ufs_bsg.h"
-#include "ufshcd-crypto.h"
-#include "ufshpb.h"
-#include <asm/unaligned.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/ufs.h>
-
-#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
- UTP_TASK_REQ_COMPL |\
- UFSHCD_ERROR_MASK)
-/* UIC command timeout, unit: ms */
-#define UIC_CMD_TIMEOUT 500
-
-/* NOP OUT retries waiting for NOP IN response */
-#define NOP_OUT_RETRIES 10
-/* Timeout after 50 msecs if NOP OUT hangs without response */
-#define NOP_OUT_TIMEOUT 50 /* msecs */
-
-/* Query request retries */
-#define QUERY_REQ_RETRIES 3
-/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
-
-/* Task management command timeout */
-#define TM_CMD_TIMEOUT 100 /* msecs */
-
-/* maximum number of retries for a general UIC command */
-#define UFS_UIC_COMMAND_RETRIES 3
-
-/* maximum number of link-startup retries */
-#define DME_LINKSTARTUP_RETRIES 3
-
-/* Maximum retries for Hibern8 enter */
-#define UIC_HIBERN8_ENTER_RETRIES 3
-
-/* maximum number of reset retries before giving up */
-#define MAX_HOST_RESET_RETRIES 5
-
-/* Maximum number of error handler retries before giving up */
-#define MAX_ERR_HANDLER_RETRIES 5
-
-/* Expose the flag value from utp_upiu_query.value */
-#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
-
-/* Interrupt aggregation default timeout, unit: 40us */
-#define INT_AGGR_DEF_TO 0x02
-
-/* default delay of autosuspend: 2000 ms */
-#define RPM_AUTOSUSPEND_DELAY_MS 2000
-
-/* Default delay of RPM device flush delayed work */
-#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
-
-/* Default value of wait time before gating device ref clock */
-#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
-
-/* Polling time to wait for fDeviceInit */
-#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
-
-#define wlun_dev_to_hba(dv) shost_priv(to_scsi_device(dv)->host)
-
-#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
- ({ \
- int _ret; \
- if (_on) \
- _ret = ufshcd_enable_vreg(_dev, _vreg); \
- else \
- _ret = ufshcd_disable_vreg(_dev, _vreg); \
- _ret; \
- })
-
-#define ufshcd_hex_dump(prefix_str, buf, len) do { \
- size_t __len = (len); \
- print_hex_dump(KERN_ERR, prefix_str, \
- __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
- 16, 4, buf, __len, false); \
-} while (0)
-
-int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix)
-{
- u32 *regs;
- size_t pos;
-
- if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
- return -EINVAL;
-
- regs = kzalloc(len, GFP_ATOMIC);
- if (!regs)
- return -ENOMEM;
-
- for (pos = 0; pos < len; pos += 4)
- regs[pos / 4] = ufshcd_readl(hba, offset + pos);
-
- ufshcd_hex_dump(prefix, regs, len);
- kfree(regs);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
-
-enum {
- UFSHCD_MAX_CHANNEL = 0,
- UFSHCD_MAX_ID = 1,
- UFSHCD_CMD_PER_LUN = 32,
- UFSHCD_CAN_QUEUE = 32,
-};
-
-static const char *const ufshcd_state_name[] = {
- [UFSHCD_STATE_RESET] = "reset",
- [UFSHCD_STATE_OPERATIONAL] = "operational",
- [UFSHCD_STATE_ERROR] = "error",
- [UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
- [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
-};
-
-/* UFSHCD error handling flags */
-enum {
- UFSHCD_EH_IN_PROGRESS = (1 << 0),
-};
-
-/* UFSHCD UIC layer error flags */
-enum {
- UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
- UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
- UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
- UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
- UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
-};
-
-#define ufshcd_set_eh_in_progress(h) \
- ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
-#define ufshcd_eh_in_progress(h) \
- ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
-#define ufshcd_clear_eh_in_progress(h) \
- ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
-
-struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
- [UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- [UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- [UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
- [UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- [UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
- [UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
- /*
- * For DeepSleep, the link is first put in hibern8 and then off.
- * Leaving the link in hibern8 is not supported.
- */
- [UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
-};
-
-static inline enum ufs_dev_pwr_mode
-ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
-{
- return ufs_pm_lvl_states[lvl].dev_state;
-}
-
-static inline enum uic_link_state
-ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
-{
- return ufs_pm_lvl_states[lvl].link_state;
-}
-
-static inline enum ufs_pm_level
-ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
- enum uic_link_state link_state)
-{
- enum ufs_pm_level lvl;
-
- for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
- if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
- (ufs_pm_lvl_states[lvl].link_state == link_state))
- return lvl;
- }
-
- /* if no match found, return the level 0 */
- return UFS_PM_LVL_0;
-}
-
-static struct ufs_dev_fix ufs_fixups[] = {
- /* UFS cards deviations table */
- UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ),
- UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
- UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
- UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
- UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
- UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
- UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
- UFS_DEVICE_QUIRK_PA_TACTIVATE),
- END_FIX
-};
-
-static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
-static void ufshcd_async_scan(void *data, async_cookie_t cookie);
-static int ufshcd_reset_and_restore(struct ufs_hba *hba);
-static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
-static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
-static void ufshcd_hba_exit(struct ufs_hba *hba);
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
-static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
-static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
-static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
-static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_mode);
-static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
-static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
-static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
- struct ufs_vreg *vreg);
-static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
-static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
-static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
-
-static inline void ufshcd_enable_irq(struct ufs_hba *hba)
-{
- if (!hba->is_irq_enabled) {
- enable_irq(hba->irq);
- hba->is_irq_enabled = true;
- }
-}
-
-static inline void ufshcd_disable_irq(struct ufs_hba *hba)
-{
- if (hba->is_irq_enabled) {
- disable_irq(hba->irq);
- hba->is_irq_enabled = false;
- }
-}
-
-static inline void ufshcd_wb_config(struct ufs_hba *hba)
-{
- if (!ufshcd_is_wb_allowed(hba))
- return;
-
- ufshcd_wb_toggle(hba, true);
-
- ufshcd_wb_toggle_flush_during_h8(hba, true);
- if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
- ufshcd_wb_toggle_flush(hba, true);
-}
-
-static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
-{
- if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
- scsi_unblock_requests(hba->host);
-}
-
-static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
-{
- if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
- scsi_block_requests(hba->host);
-}
-
-static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- enum ufs_trace_str_t str_t)
-{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
- struct utp_upiu_header *header;
-
- if (!trace_ufshcd_upiu_enabled())
- return;
-
- if (str_t == UFS_CMD_SEND)
- header = &rq->header;
- else
- header = &hba->lrb[tag].ucd_rsp_ptr->header;
-
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
- UFS_TSF_CDB);
-}
-
-static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
- enum ufs_trace_str_t str_t,
- struct utp_upiu_req *rq_rsp)
-{
- if (!trace_ufshcd_upiu_enabled())
- return;
-
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
- &rq_rsp->qr, UFS_TSF_OSF);
-}
-
-static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- enum ufs_trace_str_t str_t)
-{
- struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
-
- if (!trace_ufshcd_upiu_enabled())
- return;
-
- if (str_t == UFS_TM_SEND)
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
- &descp->upiu_req.req_header,
- &descp->upiu_req.input_param1,
- UFS_TSF_TM_INPUT);
- else
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
- &descp->upiu_rsp.rsp_header,
- &descp->upiu_rsp.output_param1,
- UFS_TSF_TM_OUTPUT);
-}
-
-static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
- struct uic_command *ucmd,
- enum ufs_trace_str_t str_t)
-{
- u32 cmd;
-
- if (!trace_ufshcd_uic_command_enabled())
- return;
-
- if (str_t == UFS_CMD_SEND)
- cmd = ucmd->command;
- else
- cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
-
- trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
- ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
- ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
- ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
-}
-
-static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
- enum ufs_trace_str_t str_t)
-{
- u64 lba;
- u8 opcode = 0, group_id = 0;
- u32 intr, doorbell;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- struct scsi_cmnd *cmd = lrbp->cmd;
- struct request *rq = scsi_cmd_to_rq(cmd);
- int transfer_len = -1;
-
- if (!cmd)
- return;
-
- /* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
- if (!trace_ufshcd_command_enabled())
- return;
-
- opcode = cmd->cmnd[0];
- lba = scsi_get_lba(cmd);
-
- if (opcode == READ_10 || opcode == WRITE_10) {
- /*
- * Currently we only fully trace read(10) and write(10) commands
- */
- transfer_len =
- be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
- if (opcode == WRITE_10)
- group_id = lrbp->cmd->cmnd[6];
- } else if (opcode == UNMAP) {
- /*
- * The number of Bytes to be unmapped beginning with the lba.
- */
- transfer_len = blk_rq_bytes(rq);
- }
-
- intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
- doorbell, transfer_len, intr, lba, opcode, group_id);
-}
-
-static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
-{
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
-
- if (list_empty(head))
- return;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
- clki->max_freq)
- dev_err(hba->dev, "clk: %s, rate: %u\n",
- clki->name, clki->curr_freq);
- }
-}
-
-static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
- char *err_name)
-{
- int i;
- bool found = false;
- struct ufs_event_hist *e;
-
- if (id >= UFS_EVT_CNT)
- return;
-
- e = &hba->ufs_stats.event[id];
-
- for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
- int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
-
- if (e->tstamp[p] == 0)
- continue;
- dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
- e->val[p], ktime_to_us(e->tstamp[p]));
- found = true;
- }
-
- if (!found)
- dev_err(hba->dev, "No record of %s\n", err_name);
- else
- dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
-}
-
-static void ufshcd_print_evt_hist(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
-
- ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
- ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
- ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
- ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
- ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
- ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
- "auto_hibern8_err");
- ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
- ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
- "link_startup_fail");
- ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
- ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
- "suspend_fail");
- ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
- ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
- ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
-
- ufshcd_vops_dbg_register_dump(hba);
-}
-
-static
-void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
-{
- struct ufshcd_lrb *lrbp;
- int prdt_length;
- int tag;
-
- for_each_set_bit(tag, &bitmap, hba->nutrs) {
- lrbp = &hba->lrb[tag];
-
- dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
- tag, ktime_to_us(lrbp->issue_time_stamp));
- dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
- tag, ktime_to_us(lrbp->compl_time_stamp));
- dev_err(hba->dev,
- "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
- tag, (u64)lrbp->utrd_dma_addr);
-
- ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
- sizeof(struct utp_transfer_req_desc));
- dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
- (u64)lrbp->ucd_req_dma_addr);
- ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
- sizeof(struct utp_upiu_req));
- dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
- (u64)lrbp->ucd_rsp_dma_addr);
- ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
- sizeof(struct utp_upiu_rsp));
-
- prdt_length = le16_to_cpu(
- lrbp->utr_descriptor_ptr->prd_table_length);
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- prdt_length /= sizeof(struct ufshcd_sg_entry);
-
- dev_err(hba->dev,
- "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
- tag, prdt_length,
- (u64)lrbp->ucd_prdt_dma_addr);
-
- if (pr_prdt)
- ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
- sizeof(struct ufshcd_sg_entry) * prdt_length);
- }
-}
-
-static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
-{
- int tag;
-
- for_each_set_bit(tag, &bitmap, hba->nutmrs) {
- struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
-
- dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
- ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
- }
-}
-
-static void ufshcd_print_host_state(struct ufs_hba *hba)
-{
- struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
-
- dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
- dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
- hba->saved_err, hba->saved_uic_err);
- dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
- hba->pm_op_in_progress, hba->is_sys_suspended);
- dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
- hba->auto_bkops_enabled, hba->host->host_self_blocked);
- dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
- dev_err(hba->dev,
- "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
- hba->ufs_stats.hibern8_exit_cnt);
- dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
- ktime_to_us(hba->ufs_stats.last_intr_ts),
- hba->ufs_stats.last_intr_status);
- dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
- hba->eh_flags, hba->req_abort_count);
- dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
- hba->ufs_version, hba->capabilities, hba->caps);
- dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
- hba->dev_quirks);
- if (sdev_ufs)
- dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
- sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
-
- ufshcd_print_clk_freqs(hba);
-}
-
-/**
- * ufshcd_print_pwr_info - print power params as saved in hba
- * power info
- * @hba: per-adapter instance
- */
-static void ufshcd_print_pwr_info(struct ufs_hba *hba)
-{
- static const char * const names[] = {
- "INVALID MODE",
- "FAST MODE",
- "SLOW_MODE",
- "INVALID MODE",
- "FASTAUTO_MODE",
- "SLOWAUTO_MODE",
- "INVALID MODE",
- };
-
- dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
- __func__,
- hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
- hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
- names[hba->pwr_info.pwr_rx],
- names[hba->pwr_info.pwr_tx],
- hba->pwr_info.hs_rate);
-}
-
-static void ufshcd_device_reset(struct ufs_hba *hba)
-{
- int err;
-
- err = ufshcd_vops_device_reset(hba);
-
- if (!err) {
- ufshcd_set_ufs_dev_active(hba);
- if (ufshcd_is_wb_allowed(hba)) {
- hba->dev_info.wb_enabled = false;
- hba->dev_info.wb_buf_flush_enabled = false;
- }
- }
- if (err != -EOPNOTSUPP)
- ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
-}
-
-void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
-{
- if (!us)
- return;
-
- if (us < 10)
- udelay(us);
- else
- usleep_range(us, us + tolerance);
-}
-EXPORT_SYMBOL_GPL(ufshcd_delay_us);
-
-/**
- * ufshcd_wait_for_register - wait for register value to change
- * @hba: per-adapter interface
- * @reg: mmio register offset
- * @mask: mask to apply to the read register value
- * @val: value to wait for
- * @interval_us: polling interval in microseconds
- * @timeout_ms: timeout in milliseconds
- *
- * Return:
- * -ETIMEDOUT on error, zero on success.
- */
-int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us,
- unsigned long timeout_ms)
-{
- int err = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
-
- /* ignore bits that we don't intend to wait on */
- val = val & mask;
-
- while ((ufshcd_readl(hba, reg) & mask) != val) {
- usleep_range(interval_us, interval_us + 50);
- if (time_after(jiffies, timeout)) {
- if ((ufshcd_readl(hba, reg) & mask) != val)
- err = -ETIMEDOUT;
- break;
- }
- }
-
- return err;
-}
-
-/**
- * ufshcd_get_intr_mask - Get the interrupt bit mask
- * @hba: Pointer to adapter instance
- *
- * Returns interrupt bit mask per version
- */
-static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
-{
- if (hba->ufs_version == ufshci_version(1, 0))
- return INTERRUPT_MASK_ALL_VER_10;
- if (hba->ufs_version <= ufshci_version(2, 0))
- return INTERRUPT_MASK_ALL_VER_11;
-
- return INTERRUPT_MASK_ALL_VER_21;
-}
-
-/**
- * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
- * @hba: Pointer to adapter instance
- *
- * Returns UFSHCI version supported by the controller
- */
-static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
-{
- u32 ufshci_ver;
-
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
- ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
- else
- ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
-
- /*
- * UFSHCI v1.x uses a different version scheme, in order
- * to allow the use of comparisons with the ufshci_version
- * function, we convert it to the same scheme as ufs 2.0+.
- */
- if (ufshci_ver & 0x00010000)
- return ufshci_version(1, ufshci_ver & 0x00000100);
-
- return ufshci_ver;
-}
-
-/**
- * ufshcd_is_device_present - Check if any device connected to
- * the host controller
- * @hba: pointer to adapter instance
- *
- * Returns true if device present, false if no device detected
- */
-static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
-{
- return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
- DEVICE_PRESENT) ? true : false;
-}
-
-/**
- * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
- * @lrbp: pointer to local command reference block
- *
- * This function is used to get the OCS field from UTRD
- * Returns the OCS field in the UTRD
- */
-static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
-{
- return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
-}
-
-/**
- * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
- * @hba: per adapter instance
- * @pos: position of the bit to be cleared
- */
-static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
-{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos),
- REG_UTP_TRANSFER_REQ_LIST_CLEAR);
-}
-
-/**
- * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
- * @hba: per adapter instance
- * @pos: position of the bit to be cleared
- */
-static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
-{
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
- ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
- else
- ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
-}
-
-/**
- * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
- * @reg: Register value of host controller status
- *
- * Returns integer, 0 on Success and positive value if failed
- */
-static inline int ufshcd_get_lists_status(u32 reg)
-{
- return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
-}
-
-/**
- * ufshcd_get_uic_cmd_result - Get the UIC command result
- * @hba: Pointer to adapter instance
- *
- * This function gets the result of UIC command completion
- * Returns 0 on success, non zero value on error
- */
-static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
-{
- return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
- MASK_UIC_COMMAND_RESULT;
-}
-
-/**
- * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
- * @hba: Pointer to adapter instance
- *
- * This function gets UIC command argument3
- * Returns 0 on success, non zero value on error
- */
-static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
-{
- return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
-}
-
-/**
- * ufshcd_get_req_rsp - returns the TR response transaction type
- * @ucd_rsp_ptr: pointer to response UPIU
- */
-static inline int
-ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
-}
-
-/**
- * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * This function gets the response status and scsi_status from response UPIU
- * Returns the response result code.
- */
-static inline int
-ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
-}
-
-/*
- * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
- * from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * Return the data segment length.
- */
-static inline unsigned int
-ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_UPIU_DATA_SEG_LEN;
-}
-
-/**
- * ufshcd_is_exception_event - Check if the device raised an exception event
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * The function checks if the device raised an exception event indicated in
- * the Device Information field of response UPIU.
- *
- * Returns true if exception is raised, false otherwise.
- */
-static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_EXCEPTION_EVENT ? true : false;
-}
-
-/**
- * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
- * @hba: per adapter instance
- */
-static inline void
-ufshcd_reset_intr_aggr(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, INT_AGGR_ENABLE |
- INT_AGGR_COUNTER_AND_TIMER_RESET,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-}
-
-/**
- * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
- * @hba: per adapter instance
- * @cnt: Interrupt aggregation counter threshold
- * @tmout: Interrupt aggregation timeout value
- */
-static inline void
-ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
-{
- ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
- INT_AGGR_COUNTER_THLD_VAL(cnt) |
- INT_AGGR_TIMEOUT_VAL(tmout),
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-}
-
-/**
- * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
- * @hba: per adapter instance
- */
-static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
-}
-
-/**
- * ufshcd_enable_run_stop_reg - Enable run-stop registers,
- * When run-stop registers are set to 1, it indicates the
- * host controller that it can process the requests
- * @hba: per adapter instance
- */
-static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
- REG_UTP_TASK_REQ_LIST_RUN_STOP);
- ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
- REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
-}
-
-/**
- * ufshcd_hba_start - Start controller initialization sequence
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_start(struct ufs_hba *hba)
-{
- u32 val = CONTROLLER_ENABLE;
-
- if (ufshcd_crypto_enable(hba))
- val |= CRYPTO_GENERAL_ENABLE;
-
- ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
-}
-
-/**
- * ufshcd_is_hba_active - Get controller state
- * @hba: per adapter instance
- *
- * Returns false if controller is active, true otherwise
- */
-static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
-{
- return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
- ? false : true;
-}
-
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
-{
- /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
- if (hba->ufs_version <= ufshci_version(1, 1))
- return UFS_UNIPRO_VER_1_41;
- else
- return UFS_UNIPRO_VER_1_6;
-}
-EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
-
-static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
-{
- /*
- * If both host and device support UniPro ver1.6 or later, PA layer
- * parameters tuning happens during link startup itself.
- *
- * We can manually tune PA layer parameters if either host or device
- * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
- * logic simple, we will only do manual tuning if local unipro version
- * doesn't support ver1.6 or later.
- */
- if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
- return true;
- else
- return false;
-}
-
-/**
- * ufshcd_set_clk_freq - set UFS controller clock frequencies
- * @hba: per adapter instance
- * @scale_up: If True, set max possible frequency othewise set low frequency
- *
- * Returns 0 if successful
- * Returns < 0 for any other errors
- */
-static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
-{
- int ret = 0;
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
-
- if (list_empty(head))
- goto out;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk)) {
- if (scale_up && clki->max_freq) {
- if (clki->curr_freq == clki->max_freq)
- continue;
-
- ret = clk_set_rate(clki->clk, clki->max_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->max_freq, ret);
- break;
- }
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
- "scaled up", clki->name,
- clki->curr_freq,
- clki->max_freq);
-
- clki->curr_freq = clki->max_freq;
-
- } else if (!scale_up && clki->min_freq) {
- if (clki->curr_freq == clki->min_freq)
- continue;
-
- ret = clk_set_rate(clki->clk, clki->min_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->min_freq, ret);
- break;
- }
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
- "scaled down", clki->name,
- clki->curr_freq,
- clki->min_freq);
- clki->curr_freq = clki->min_freq;
- }
- }
- dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
- clki->name, clk_get_rate(clki->clk));
- }
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_scale_clks - scale up or scale down UFS controller clocks
- * @hba: per adapter instance
- * @scale_up: True if scaling up and false if scaling down
- *
- * Returns 0 if successful
- * Returns < 0 for any other errors
- */
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
-{
- int ret = 0;
- ktime_t start = ktime_get();
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
- if (ret)
- goto out;
-
- ret = ufshcd_set_clk_freq(hba, scale_up);
- if (ret)
- goto out;
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
- if (ret)
- ufshcd_set_clk_freq(hba, !scale_up);
-
-out:
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
- (scale_up ? "up" : "down"),
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- return ret;
-}
-
-/**
- * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
- * @hba: per adapter instance
- * @scale_up: True if scaling up and false if scaling down
- *
- * Returns true if scaling is required, false otherwise.
- */
-static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
- bool scale_up)
-{
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
-
- if (list_empty(head))
- return false;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk)) {
- if (scale_up && clki->max_freq) {
- if (clki->curr_freq == clki->max_freq)
- continue;
- return true;
- } else if (!scale_up && clki->min_freq) {
- if (clki->curr_freq == clki->min_freq)
- continue;
- return true;
- }
- }
- }
-
- return false;
-}
-
-static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
- u64 wait_timeout_us)
-{
- unsigned long flags;
- int ret = 0;
- u32 tm_doorbell;
- u32 tr_doorbell;
- bool timeout = false, do_last_check = false;
- ktime_t start;
-
- ufshcd_hold(hba, false);
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * Wait for all the outstanding tasks/transfer requests.
- * Verify by checking the doorbell registers are clear.
- */
- start = ktime_get();
- do {
- if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
- ret = -EBUSY;
- goto out;
- }
-
- tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (!tm_doorbell && !tr_doorbell) {
- timeout = false;
- break;
- } else if (do_last_check) {
- break;
- }
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- schedule();
- if (ktime_to_us(ktime_sub(ktime_get(), start)) >
- wait_timeout_us) {
- timeout = true;
- /*
- * We might have scheduled out for long time so make
- * sure to check if doorbells are cleared by this time
- * or not.
- */
- do_last_check = true;
- }
- spin_lock_irqsave(hba->host->host_lock, flags);
- } while (tm_doorbell || tr_doorbell);
-
- if (timeout) {
- dev_err(hba->dev,
- "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
- __func__, tm_doorbell, tr_doorbell);
- ret = -EBUSY;
- }
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
- return ret;
-}
-
-/**
- * ufshcd_scale_gear - scale up/down UFS gear
- * @hba: per adapter instance
- * @scale_up: True for scaling up gear and false for scaling down
- *
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
- */
-static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
-{
- int ret = 0;
- struct ufs_pa_layer_attr new_pwr_info;
-
- if (scale_up) {
- memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
- sizeof(struct ufs_pa_layer_attr));
- } else {
- memcpy(&new_pwr_info, &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
-
- if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
- hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
- /* save the current power mode */
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
-
- /* scale down gear */
- new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
- new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
- }
- }
-
- /* check if the power mode needs to be changed or not? */
- ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
- if (ret)
- dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
- __func__, ret,
- hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
- new_pwr_info.gear_tx, new_pwr_info.gear_rx);
-
- return ret;
-}
-
-static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
-{
- #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
- int ret = 0;
- /*
- * make sure that there are no outstanding requests when
- * clock scaling is in progress
- */
- ufshcd_scsi_block_requests(hba);
- down_write(&hba->clk_scaling_lock);
-
- if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
- ret = -EBUSY;
- up_write(&hba->clk_scaling_lock);
- ufshcd_scsi_unblock_requests(hba);
- goto out;
- }
-
- /* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
-
-out:
- return ret;
-}
-
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
-{
- if (writelock)
- up_write(&hba->clk_scaling_lock);
- else
- up_read(&hba->clk_scaling_lock);
- ufshcd_scsi_unblock_requests(hba);
- ufshcd_release(hba);
-}
-
-/**
- * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
- * @hba: per adapter instance
- * @scale_up: True for scaling up and false for scalin down
- *
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
- */
-static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
-{
- int ret = 0;
- bool is_writelock = true;
-
- ret = ufshcd_clock_scaling_prepare(hba);
- if (ret)
- return ret;
-
- /* scale down the gear before scaling down clocks */
- if (!scale_up) {
- ret = ufshcd_scale_gear(hba, false);
- if (ret)
- goto out_unprepare;
- }
-
- ret = ufshcd_scale_clks(hba, scale_up);
- if (ret) {
- if (!scale_up)
- ufshcd_scale_gear(hba, true);
- goto out_unprepare;
- }
-
- /* scale up the gear after scaling up clocks */
- if (scale_up) {
- ret = ufshcd_scale_gear(hba, true);
- if (ret) {
- ufshcd_scale_clks(hba, false);
- goto out_unprepare;
- }
- }
-
- /* Enable Write Booster if we have scaled up else disable it */
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
-
-out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, is_writelock);
- return ret;
-}
-
-static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_scaling.suspend_work);
- unsigned long irq_flags;
-
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
- }
- hba->clk_scaling.is_suspended = true;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- __ufshcd_suspend_clkscaling(hba);
-}
-
-static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_scaling.resume_work);
- unsigned long irq_flags;
-
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (!hba->clk_scaling.is_suspended) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return;
- }
- hba->clk_scaling.is_suspended = false;
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- devfreq_resume_device(hba->devfreq);
-}
-
-static int ufshcd_devfreq_target(struct device *dev,
- unsigned long *freq, u32 flags)
-{
- int ret = 0;
- struct ufs_hba *hba = dev_get_drvdata(dev);
- ktime_t start;
- bool scale_up, sched_clk_scaling_suspend_work = false;
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
- unsigned long irq_flags;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return -EINVAL;
-
- clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
- /* Override with the closest supported frequency */
- *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
-
- if (!hba->clk_scaling.active_reqs)
- sched_clk_scaling_suspend_work = true;
-
- if (list_empty(clk_list)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- goto out;
- }
-
- /* Decide based on the rounded-off frequency and update */
- scale_up = (*freq == clki->max_freq) ? true : false;
- if (!scale_up)
- *freq = clki->min_freq;
- /* Update the frequency */
- if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- ret = 0;
- goto out; /* no state change required */
- }
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
- start = ktime_get();
- ret = ufshcd_devfreq_scale(hba, scale_up);
-
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
- (scale_up ? "up" : "down"),
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
-
-out:
- if (sched_clk_scaling_suspend_work)
- queue_work(hba->clk_scaling.workq,
- &hba->clk_scaling.suspend_work);
-
- return ret;
-}
-
-static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
-{
- int *busy = priv;
-
- WARN_ON_ONCE(reserved);
- (*busy)++;
- return false;
-}
-
-/* Whether or not any tag is in use by a request that is in progress. */
-static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
-{
- struct request_queue *q = hba->cmd_queue;
- int busy = 0;
-
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
- return busy;
-}
-
-static int ufshcd_devfreq_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
- ktime_t curr_t;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return -EINVAL;
-
- memset(stat, 0, sizeof(*stat));
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- curr_t = ktime_get();
- if (!scaling->window_start_t)
- goto start_window;
-
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
- /*
- * If current frequency is 0, then the ondemand governor considers
- * there's no initial frequency set. And it always requests to set
- * to max. frequency.
- */
- stat->current_frequency = clki->curr_freq;
- if (scaling->is_busy_started)
- scaling->tot_busy_t += ktime_us_delta(curr_t,
- scaling->busy_start_t);
-
- stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
- stat->busy_time = scaling->tot_busy_t;
-start_window:
- scaling->window_start_t = curr_t;
- scaling->tot_busy_t = 0;
-
- if (hba->outstanding_reqs) {
- scaling->busy_start_t = curr_t;
- scaling->is_busy_started = true;
- } else {
- scaling->busy_start_t = 0;
- scaling->is_busy_started = false;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return 0;
-}
-
-static int ufshcd_devfreq_init(struct ufs_hba *hba)
-{
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
- struct devfreq *devfreq;
- int ret;
-
- /* Skip devfreq if we don't have any clocks in the list */
- if (list_empty(clk_list))
- return 0;
-
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
- dev_pm_opp_add(hba->dev, clki->min_freq, 0);
- dev_pm_opp_add(hba->dev, clki->max_freq, 0);
-
- ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
- &hba->vps->ondemand_data);
- devfreq = devfreq_add_device(hba->dev,
- &hba->vps->devfreq_profile,
- DEVFREQ_GOV_SIMPLE_ONDEMAND,
- &hba->vps->ondemand_data);
- if (IS_ERR(devfreq)) {
- ret = PTR_ERR(devfreq);
- dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
-
- dev_pm_opp_remove(hba->dev, clki->min_freq);
- dev_pm_opp_remove(hba->dev, clki->max_freq);
- return ret;
- }
-
- hba->devfreq = devfreq;
-
- return 0;
-}
-
-static void ufshcd_devfreq_remove(struct ufs_hba *hba)
-{
- struct list_head *clk_list = &hba->clk_list_head;
- struct ufs_clk_info *clki;
-
- if (!hba->devfreq)
- return;
-
- devfreq_remove_device(hba->devfreq);
- hba->devfreq = NULL;
-
- clki = list_first_entry(clk_list, struct ufs_clk_info, list);
- dev_pm_opp_remove(hba->dev, clki->min_freq);
- dev_pm_opp_remove(hba->dev, clki->max_freq);
-}
-
-static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
-{
- unsigned long flags;
-
- devfreq_suspend_device(hba->devfreq);
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_scaling.window_start_t = 0;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
-static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
-{
- unsigned long flags;
- bool suspend = false;
-
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.is_suspended) {
- suspend = true;
- hba->clk_scaling.is_suspended = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (suspend)
- __ufshcd_suspend_clkscaling(hba);
-}
-
-static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
-{
- unsigned long flags;
- bool resume = false;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_scaling.is_suspended) {
- resume = true;
- hba->clk_scaling.is_suspended = false;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (resume)
- devfreq_resume_device(hba->devfreq);
-}
-
-static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
-}
-
-static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- u32 value;
- int err = 0;
-
- if (kstrtou32(buf, 0, &value))
- return -EINVAL;
-
- down(&hba->host_sem);
- if (!ufshcd_is_user_access_allowed(hba)) {
- err = -EBUSY;
- goto out;
- }
-
- value = !!value;
- if (value == hba->clk_scaling.is_enabled)
- goto out;
-
- ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
-
- hba->clk_scaling.is_enabled = value;
-
- if (value) {
- ufshcd_resume_clkscaling(hba);
- } else {
- ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, true);
- if (err)
- dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
- __func__, err);
- }
-
- ufshcd_release(hba);
- ufshcd_rpm_put_sync(hba);
-out:
- up(&hba->host_sem);
- return err ? err : count;
-}
-
-static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
-{
- hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
- hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
- sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
- hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
- hba->clk_scaling.enable_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
-}
-
-static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
-{
- if (hba->clk_scaling.enable_attr.attr.name)
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
-}
-
-static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
-{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- if (!hba->clk_scaling.min_gear)
- hba->clk_scaling.min_gear = UFS_HS_G1;
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
- hba->clk_scaling.is_initialized = true;
-}
-
-static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
-{
- if (!hba->clk_scaling.is_initialized)
- return;
-
- ufshcd_remove_clk_scaling_sysfs(hba);
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
- hba->clk_scaling.is_initialized = false;
-}
-
-static void ufshcd_ungate_work(struct work_struct *work)
-{
- int ret;
- unsigned long flags;
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.ungate_work);
-
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == CLKS_ON) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto unblock_reqs;
- }
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_hba_vreg_set_hpm(hba);
- ufshcd_setup_clocks(hba, true);
-
- ufshcd_enable_irq(hba);
-
- /* Exit from hibern8 */
- if (ufshcd_can_hibern8_during_gating(hba)) {
- /* Prevent gating in this path */
- hba->clk_gating.is_suspended = true;
- if (ufshcd_is_link_hibern8(hba)) {
- ret = ufshcd_uic_hibern8_exit(hba);
- if (ret)
- dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
- __func__, ret);
- else
- ufshcd_set_link_active(hba);
- }
- hba->clk_gating.is_suspended = false;
- }
-unblock_reqs:
- ufshcd_scsi_unblock_requests(hba);
-}
-
-/**
- * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
- * Also, exit from hibern8 mode and set the link as active.
- * @hba: per adapter instance
- * @async: This indicates whether caller should ungate clocks asynchronously.
- */
-int ufshcd_hold(struct ufs_hba *hba, bool async)
-{
- int rc = 0;
- bool flush_result;
- unsigned long flags;
-
- if (!ufshcd_is_clkgating_allowed(hba))
- goto out;
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.active_reqs++;
-
-start:
- switch (hba->clk_gating.state) {
- case CLKS_ON:
- /*
- * Wait for the ungate work to complete if in progress.
- * Though the clocks may be in ON state, the link could
- * still be in hibner8 state if hibern8 is allowed
- * during clock gating.
- * Make sure we exit hibern8 state also in addition to
- * clocks being ON.
- */
- if (ufshcd_can_hibern8_during_gating(hba) &&
- ufshcd_is_link_hibern8(hba)) {
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_result = flush_work(&hba->clk_gating.ungate_work);
- if (hba->clk_gating.is_suspended && !flush_result)
- goto out;
- spin_lock_irqsave(hba->host->host_lock, flags);
- goto start;
- }
- break;
- case REQ_CLKS_OFF:
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- break;
- }
- /*
- * If we are here, it means gating work is either done or
- * currently running. Hence, fall through to cancel gating
- * work and to enable clocks.
- */
- fallthrough;
- case CLKS_OFF:
- hba->clk_gating.state = REQ_CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- if (queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work))
- ufshcd_scsi_block_requests(hba);
- /*
- * fall through to check if we should wait for this
- * work to be done or not.
- */
- fallthrough;
- case REQ_CLKS_ON:
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
- /* Make sure state is CLKS_ON before returning */
- spin_lock_irqsave(hba->host->host_lock, flags);
- goto start;
- default:
- dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
- __func__, hba->clk_gating.state);
- break;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return rc;
-}
-EXPORT_SYMBOL_GPL(ufshcd_hold);
-
-static void ufshcd_gate_work(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.gate_work.work);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case save time by
- * skipping the gating work and exit after changing the clock
- * state to CLKS_ON.
- */
- if (hba->clk_gating.is_suspended ||
- (hba->clk_gating.state != REQ_CLKS_OFF)) {
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto rel_lock;
- }
-
- if (hba->clk_gating.active_reqs
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done)
- goto rel_lock;
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* put the link into hibern8 mode before turning off clocks */
- if (ufshcd_can_hibern8_during_gating(hba)) {
- ret = ufshcd_uic_hibern8_enter(hba);
- if (ret) {
- hba->clk_gating.state = CLKS_ON;
- dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
- __func__, ret);
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- goto out;
- }
- ufshcd_set_link_hibern8(hba);
- }
-
- ufshcd_disable_irq(hba);
-
- ufshcd_setup_clocks(hba, false);
-
- /* Put the host controller in low power mode if possible */
- ufshcd_hba_vreg_set_lpm(hba);
- /*
- * In case you are here to cancel this work the gating state
- * would be marked as REQ_CLKS_ON. In this case keep the state
- * as REQ_CLKS_ON which would anyway imply that clocks are off
- * and a request to turn them on is pending. By doing this way,
- * we keep the state machine in tact and this would ultimately
- * prevent from doing cancel work multiple times when there are
- * new requests arriving before the current cancel work is done.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == REQ_CLKS_OFF) {
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- }
-rel_lock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return;
-}
-
-/* host lock must be held before calling this variant */
-static void __ufshcd_release(struct ufs_hba *hba)
-{
- if (!ufshcd_is_clkgating_allowed(hba))
- return;
-
- hba->clk_gating.active_reqs--;
-
- if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
- hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks ||
- hba->active_uic_cmd || hba->uic_async_done ||
- hba->clk_gating.state == CLKS_OFF)
- return;
-
- hba->clk_gating.state = REQ_CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- queue_delayed_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
-}
-
-void ufshcd_release(struct ufs_hba *hba)
-{
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- __ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-EXPORT_SYMBOL_GPL(ufshcd_release);
-
-static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
-}
-
-static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags, value;
-
- if (kstrtoul(buf, 0, &value))
- return -EINVAL;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.delay_ms = value;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
-}
-
-static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- unsigned long flags;
- u32 value;
-
- if (kstrtou32(buf, 0, &value))
- return -EINVAL;
-
- value = !!value;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (value == hba->clk_gating.is_enabled)
- goto out;
-
- if (value)
- __ufshcd_release(hba);
- else
- hba->clk_gating.active_reqs++;
-
- hba->clk_gating.is_enabled = value;
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return count;
-}
-
-static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
-{
- hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
- hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
- sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
- hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
- hba->clk_gating.delay_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
-
- hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
- hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
- sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
- hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
- hba->clk_gating.enable_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
-}
-
-static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
-{
- if (hba->clk_gating.delay_attr.attr.name)
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
- if (hba->clk_gating.enable_attr.attr.name)
- device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
-}
-
-static void ufshcd_init_clk_gating(struct ufs_hba *hba)
-{
- char wq_name[sizeof("ufs_clk_gating_00")];
-
- if (!ufshcd_is_clkgating_allowed(hba))
- return;
-
- hba->clk_gating.state = CLKS_ON;
-
- hba->clk_gating.delay_ms = 150;
- INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
- INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
-
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
- hba->host->host_no);
- hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
- WQ_MEM_RECLAIM | WQ_HIGHPRI);
-
- ufshcd_init_clk_gating_sysfs(hba);
-
- hba->clk_gating.is_enabled = true;
- hba->clk_gating.is_initialized = true;
-}
-
-static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
-{
- if (!hba->clk_gating.is_initialized)
- return;
- ufshcd_remove_clk_gating_sysfs(hba);
- cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- destroy_workqueue(hba->clk_gating.clk_gating_workq);
- hba->clk_gating.is_initialized = false;
-}
-
-/* Must be called with host lock acquired */
-static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
-{
- bool queue_resume_work = false;
- ktime_t curr_t = ktime_get();
- unsigned long flags;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->clk_scaling.active_reqs++)
- queue_resume_work = true;
-
- if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return;
- }
-
- if (queue_resume_work)
- queue_work(hba->clk_scaling.workq,
- &hba->clk_scaling.resume_work);
-
- if (!hba->clk_scaling.window_start_t) {
- hba->clk_scaling.window_start_t = curr_t;
- hba->clk_scaling.tot_busy_t = 0;
- hba->clk_scaling.is_busy_started = false;
- }
-
- if (!hba->clk_scaling.is_busy_started) {
- hba->clk_scaling.busy_start_t = curr_t;
- hba->clk_scaling.is_busy_started = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
-static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
-{
- struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- unsigned long flags;
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_scaling.active_reqs--;
- if (!hba->outstanding_reqs && scaling->is_busy_started) {
- scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
- scaling->busy_start_t));
- scaling->busy_start_t = 0;
- scaling->is_busy_started = false;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
-static inline int ufshcd_monitor_opcode2dir(u8 opcode)
-{
- if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
- return READ;
- else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
- return WRITE;
- else
- return -EINVAL;
-}
-
-static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
-{
- struct ufs_hba_monitor *m = &hba->monitor;
-
- return (m->enabled && lrbp && lrbp->cmd &&
- (!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
- ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
-}
-
-static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
- hba->monitor.busy_start_ts[dir] = ktime_get();
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
-static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
- struct request *req = scsi_cmd_to_rq(lrbp->cmd);
- struct ufs_hba_monitor *m = &hba->monitor;
- ktime_t now, inc, lat;
-
- now = lrbp->compl_time_stamp;
- inc = ktime_sub(now, m->busy_start_ts[dir]);
- m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
- m->nr_sec_rw[dir] += blk_rq_sectors(req);
-
- /* Update latencies */
- m->nr_req[dir]++;
- lat = ktime_sub(now, lrbp->issue_time_stamp);
- m->lat_sum[dir] += lat;
- if (m->lat_max[dir] < lat || !m->lat_max[dir])
- m->lat_max[dir] = lat;
- if (m->lat_min[dir] > lat || !m->lat_min[dir])
- m->lat_min[dir] = lat;
-
- m->nr_queued[dir]--;
- /* Push forward the busy start of monitor */
- m->busy_start_ts[dir] = now;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
-/**
- * ufshcd_send_command - Send SCSI or device management commands
- * @hba: per adapter instance
- * @task_tag: Task tag of the command
- */
-static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
-{
- struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
- unsigned long flags;
-
- lrbp->issue_time_stamp = ktime_get();
- lrbp->compl_time_stamp = ktime_set(0, 0);
- ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
- ufshcd_clk_scaling_start_busy(hba);
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_start_monitor(hba, lrbp);
-
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- /* Make sure that doorbell is committed immediately */
- wmb();
-}
-
-/**
- * ufshcd_copy_sense_data - Copy sense data in case of check condition
- * @lrbp: pointer to local reference block
- */
-static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
-{
- int len;
- if (lrbp->sense_buffer &&
- ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
- int len_to_copy;
-
- len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
- len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
-
- memcpy(lrbp->sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
- len_to_copy);
- }
-}
-
-/**
- * ufshcd_copy_query_response() - Copy the Query Response and the data
- * descriptor
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static
-int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
-
- memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
-
- /* Get the descriptor */
- if (hba->dev_cmd.query.descriptor &&
- lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
- u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
- GENERAL_UPIU_REQUEST_SIZE;
- u16 resp_len;
- u16 buf_len;
-
- /* data segment length */
- resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
- buf_len = be16_to_cpu(
- hba->dev_cmd.query.request.upiu_req.length);
- if (likely(buf_len >= resp_len)) {
- memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
- } else {
- dev_warn(hba->dev,
- "%s: rsp size %d is bigger than buffer size %d",
- __func__, resp_len, buf_len);
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
- * ufshcd_hba_capabilities - Read controller capabilities
- * @hba: per adapter instance
- *
- * Return: 0 on success, negative on error.
- */
-static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
-{
- int err;
-
- hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
-
- /* nutrs and nutmrs are 0 based values */
- hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
- hba->nutmrs =
- ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
-
- /* Read crypto capabilities */
- err = ufshcd_hba_init_crypto_capabilities(hba);
- if (err)
- dev_err(hba->dev, "crypto setup failed\n");
-
- return err;
-}
-
-/**
- * ufshcd_ready_for_uic_cmd - Check if controller is ready
- * to accept UIC commands
- * @hba: per adapter instance
- * Return true on success, else false
- */
-static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
-{
- if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
- return true;
- else
- return false;
-}
-
-/**
- * ufshcd_get_upmcrs - Get the power mode change request status
- * @hba: Pointer to adapter instance
- *
- * This function gets the UPMCRS field of HCS register
- * Returns value of UPMCRS field
- */
-static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
-{
- return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
-}
-
-/**
- * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- */
-static inline void
-ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
-{
- lockdep_assert_held(&hba->uic_cmd_mutex);
-
- WARN_ON(hba->active_uic_cmd);
-
- hba->active_uic_cmd = uic_cmd;
-
- /* Write Args */
- ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
- ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
- ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
-
- ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
-
- /* Write UIC Cmd */
- ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
- REG_UIC_COMMAND);
-}
-
-/**
- * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- *
- * Returns 0 only if success.
- */
-static int
-ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
-{
- int ret;
- unsigned long flags;
-
- lockdep_assert_held(&hba->uic_cmd_mutex);
-
- if (wait_for_completion_timeout(&uic_cmd->done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
- ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
- } else {
- ret = -ETIMEDOUT;
- dev_err(hba->dev,
- "uic cmd 0x%x with arg3 0x%x completion timeout\n",
- uic_cmd->command, uic_cmd->argument3);
-
- if (!uic_cmd->cmd_active) {
- dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
- __func__);
- ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
- }
- }
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->active_uic_cmd = NULL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return ret;
-}
-
-/**
- * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- * @completion: initialize the completion only if this is set to true
- *
- * Returns 0 only if success.
- */
-static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
- bool completion)
-{
- lockdep_assert_held(&hba->uic_cmd_mutex);
- lockdep_assert_held(hba->host->host_lock);
-
- if (!ufshcd_ready_for_uic_cmd(hba)) {
- dev_err(hba->dev,
- "Controller not ready to accept UIC commands\n");
- return -EIO;
- }
-
- if (completion)
- init_completion(&uic_cmd->done);
-
- uic_cmd->cmd_active = 1;
- ufshcd_dispatch_uic_cmd(hba, uic_cmd);
-
- return 0;
-}
-
-/**
- * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
- * @hba: per adapter instance
- * @uic_cmd: UIC command
- *
- * Returns 0 only if success.
- */
-int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
-{
- int ret;
- unsigned long flags;
-
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
- return 0;
-
- ufshcd_hold(hba, false);
- mutex_lock(&hba->uic_cmd_mutex);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (!ret)
- ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
- mutex_unlock(&hba->uic_cmd_mutex);
-
- ufshcd_release(hba);
- return ret;
-}
-
-/**
- * ufshcd_map_sg - Map scatter-gather list to prdt
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- *
- * Returns 0 in case of success, non-zero value in case of failure
- */
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshcd_sg_entry *prd_table;
- struct scatterlist *sg;
- struct scsi_cmnd *cmd;
- int sg_segments;
- int i;
-
- cmd = lrbp->cmd;
- sg_segments = scsi_dma_map(cmd);
- if (sg_segments < 0)
- return sg_segments;
-
- if (sg_segments) {
-
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((sg_segments *
- sizeof(struct ufshcd_sg_entry)));
- else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16(sg_segments);
-
- prd_table = lrbp->ucd_prdt_ptr;
-
- scsi_for_each_sg(cmd, sg, sg_segments, i) {
- const unsigned int len = sg_dma_len(sg);
-
- /*
- * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
- * based value that indicates the length, in bytes, of
- * the data block. A maximum of length of 256KB may
- * exist for any entry. Bits 1:0 of this field shall be
- * 11b to indicate Dword granularity. A value of '3'
- * indicates 4 bytes, '7' indicates 8 bytes, etc."
- */
- WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
- prd_table[i].size = cpu_to_le32(len - 1);
- prd_table[i].addr = cpu_to_le64(sg->dma_address);
- prd_table[i].reserved = 0;
- }
- } else {
- lrbp->utr_descriptor_ptr->prd_table_length = 0;
- }
-
- return 0;
-}
-
-/**
- * ufshcd_enable_intr - enable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = set & INTERRUPT_MASK_RW_VER_10;
- set = rw | ((set ^ intrs) & intrs);
- } else {
- set |= intrs;
- }
-
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_disable_intr - disable interrupts
- * @hba: per adapter instance
- * @intrs: interrupt bits
- */
-static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
-{
- u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
-
- if (hba->ufs_version == ufshci_version(1, 0)) {
- u32 rw;
- rw = (set & INTERRUPT_MASK_RW_VER_10) &
- ~(intrs & INTERRUPT_MASK_RW_VER_10);
- set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
-
- } else {
- set &= ~intrs;
- }
-
- ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
-}
-
-/**
- * ufshcd_prepare_req_desc_hdr() - Fills the requests header
- * descriptor according to request
- * @lrbp: pointer to local reference block
- * @upiu_flags: flags required in the header
- * @cmd_dir: requests data direction
- */
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u8 *upiu_flags, enum dma_data_direction cmd_dir)
-{
- struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
- u32 data_direction;
- u32 dword_0;
- u32 dword_1 = 0;
- u32 dword_3 = 0;
-
- if (cmd_dir == DMA_FROM_DEVICE) {
- data_direction = UTP_DEVICE_TO_HOST;
- *upiu_flags = UPIU_CMD_FLAGS_READ;
- } else if (cmd_dir == DMA_TO_DEVICE) {
- data_direction = UTP_HOST_TO_DEVICE;
- *upiu_flags = UPIU_CMD_FLAGS_WRITE;
- } else {
- data_direction = UTP_NO_DATA_TRANSFER;
- *upiu_flags = UPIU_CMD_FLAGS_NONE;
- }
-
- dword_0 = data_direction | (lrbp->command_type
- << UPIU_COMMAND_TYPE_OFFSET);
- if (lrbp->intr_cmd)
- dword_0 |= UTP_REQ_DESC_INT_CMD;
-
- /* Prepare crypto related dwords */
- ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
-
- /* Transfer request descriptor header fields */
- req_desc->header.dword_0 = cpu_to_le32(dword_0);
- req_desc->header.dword_1 = cpu_to_le32(dword_1);
- /*
- * assigning invalid value for command status. Controller
- * updates OCS on command completion, with the command
- * status
- */
- req_desc->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
- req_desc->header.dword_3 = cpu_to_le32(dword_3);
-
- req_desc->prd_table_length = 0;
-}
-
-/**
- * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
- * for scsi commands
- * @lrbp: local reference block pointer
- * @upiu_flags: flags
- */
-static
-void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
-{
- struct scsi_cmnd *cmd = lrbp->cmd;
- struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
- unsigned short cdb_len;
-
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_COMMAND, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
-
- /* Total EHS length and Data segment length will be zero */
- ucd_req_ptr->header.dword_2 = 0;
-
- ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
-
- cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
- memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-}
-
-/**
- * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
- * for query requsts
- * @hba: UFS hba
- * @lrbp: local reference block pointer
- * @upiu_flags: flags
- */
-static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, u8 upiu_flags)
-{
- struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
- struct ufs_query *query = &hba->dev_cmd.query;
- u16 len = be16_to_cpu(query->request.upiu_req.length);
-
- /* Query request header */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- 0, query->request.query_func, 0, 0);
-
- /* Data segment length only need for WRITE_DESC */
- if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- ucd_req_ptr->header.dword_2 =
- UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
- else
- ucd_req_ptr->header.dword_2 = 0;
-
- /* Copy the Query Request buffer as is */
- memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
- QUERY_OSF_SIZE);
-
- /* Copy the Descriptor */
- if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- memcpy(ucd_req_ptr + 1, query->descriptor, len);
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-}
-
-static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
-{
- struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
-
- memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
-
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 =
- UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
- /* clear rest of the fields of basic header */
- ucd_req_ptr->header.dword_1 = 0;
- ucd_req_ptr->header.dword_2 = 0;
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-}
-
-/**
- * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
- * for Device Management Purposes
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp)
-{
- u8 upiu_flags;
- int ret = 0;
-
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
- if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
- ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
- else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
- ufshcd_prepare_utp_nop_upiu(lrbp);
- else
- ret = -EINVAL;
-
- return ret;
-}
-
-/**
- * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
- * for SCSI Purposes
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- u8 upiu_flags;
- int ret = 0;
-
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_SCSI;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
- ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-/**
- * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
- * @upiu_wlun_id: UPIU W-LUN id
- *
- * Returns SCSI W-LUN id
- */
-static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
-{
- return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
-}
-
-static inline bool is_rpmb_wlun(struct scsi_device *sdev)
-{
- return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
-}
-
-static inline bool is_device_wlun(struct scsi_device *sdev)
-{
- return sdev->lun ==
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
-}
-
-static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
-{
- struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
- struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
- dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * sizeof(struct utp_transfer_cmd_desc);
- u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
- response_upiu);
- u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
-
- lrb->utr_descriptor_ptr = utrdlp + i;
- lrb->utrd_dma_addr = hba->utrdl_dma_addr +
- i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
- lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
- lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
- lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
-}
-
-/**
- * ufshcd_queuecommand - main entry point for SCSI requests
- * @host: SCSI host pointer
- * @cmd: command from SCSI Midlayer
- *
- * Returns 0 for success, non-zero in case of failure
- */
-static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
-{
- struct ufs_hba *hba = shost_priv(host);
- int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp;
- int err = 0;
-
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
-
- if (!down_read_trylock(&hba->clk_scaling_lock))
- return SCSI_MLQUEUE_HOST_BUSY;
-
- switch (hba->ufshcd_state) {
- case UFSHCD_STATE_OPERATIONAL:
- break;
- case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
- /*
- * SCSI error handler can call ->queuecommand() while UFS error
- * handler is in progress. Error interrupts could change the
- * state from UFSHCD_STATE_RESET to
- * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
- * being issued in that case.
- */
- if (ufshcd_eh_in_progress(hba)) {
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- break;
- case UFSHCD_STATE_EH_SCHEDULED_FATAL:
- /*
- * pm_runtime_get_sync() is used at error handling preparation
- * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
- * PM ops, it can never be finished if we let SCSI layer keep
- * retrying it, which gets err handler stuck forever. Neither
- * can we let the scsi cmd pass through, because UFS is in bad
- * state, the scsi cmd may eventually time out, which will get
- * err handler blocked for too long. So, just fail the scsi cmd
- * sent from PM ops, err handler can recover PM error anyways.
- */
- if (hba->pm_op_in_progress) {
- hba->force_reset = true;
- set_host_byte(cmd, DID_BAD_TARGET);
- scsi_done(cmd);
- goto out;
- }
- fallthrough;
- case UFSHCD_STATE_RESET:
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- case UFSHCD_STATE_ERROR:
- set_host_byte(cmd, DID_ERROR);
- scsi_done(cmd);
- goto out;
- }
-
- hba->req_abort_count = 0;
-
- err = ufshcd_hold(hba, true);
- if (err) {
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
- (hba->clk_gating.state != CLKS_ON));
-
- lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
- lrbp->cmd = cmd;
- lrbp->sense_bufflen = UFS_SENSE_SIZE;
- lrbp->sense_buffer = cmd->sense_buffer;
- lrbp->task_tag = tag;
- lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
- lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
-
- ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
-
- lrbp->req_abort_skip = false;
-
- ufshpb_prep(hba, lrbp);
-
- ufshcd_comp_scsi_upiu(hba, lrbp);
-
- err = ufshcd_map_sg(hba, lrbp);
- if (err) {
- lrbp->cmd = NULL;
- ufshcd_release(hba);
- goto out;
- }
-
- ufshcd_send_command(hba, tag);
-out:
- up_read(&hba->clk_scaling_lock);
-
- if (ufs_trigger_eh()) {
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_schedule_eh_work(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-
- return err;
-}
-
-static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
-{
- lrbp->cmd = NULL;
- lrbp->sense_bufflen = 0;
- lrbp->sense_buffer = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0; /* device management cmd is not specific to any LUN */
- lrbp->intr_cmd = true; /* No interrupt aggregation */
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = cmd_type;
-
- return ufshcd_compose_devman_upiu(hba, lrbp);
-}
-
-static int
-ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
-{
- int err = 0;
- unsigned long flags;
- u32 mask = 1 << tag;
-
- /* clear outstanding transaction before retry */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_utrl_clear(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /*
- * wait for h/w to clear corresponding bit in door-bell.
- * max. wait is 1 sec.
- */
- err = ufshcd_wait_for_register(hba,
- REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000);
-
- return err;
-}
-
-static int
-ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
-
- /* Get the UPIU response */
- query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
- UPIU_RSP_CODE_OFFSET;
- return query_res->response;
-}
-
-/**
- * ufshcd_dev_cmd_completion() - handles device management command responses
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- */
-static int
-ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- int resp;
- int err = 0;
-
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
- switch (resp) {
- case UPIU_TRANSACTION_NOP_IN:
- if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
- err = -EINVAL;
- dev_err(hba->dev, "%s: unexpected response %x\n",
- __func__, resp);
- }
- break;
- case UPIU_TRANSACTION_QUERY_RSP:
- err = ufshcd_check_query_response(hba, lrbp);
- if (!err)
- err = ufshcd_copy_query_response(hba, lrbp);
- break;
- case UPIU_TRANSACTION_REJECT_UPIU:
- /* TODO: handle Reject UPIU Response */
- err = -EPERM;
- dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
- __func__);
- break;
- default:
- err = -EINVAL;
- dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
- __func__, resp);
- break;
- }
-
- return err;
-}
-
-static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp, int max_timeout)
-{
- int err = 0;
- unsigned long time_left;
- unsigned long flags;
-
- time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
- msecs_to_jiffies(max_timeout));
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->dev_cmd.complete = NULL;
- if (likely(time_left)) {
- err = ufshcd_get_tr_ocs(lrbp);
- if (!err)
- err = ufshcd_dev_cmd_completion(hba, lrbp);
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (!time_left) {
- err = -ETIMEDOUT;
- dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
- __func__, lrbp->task_tag);
- if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
- /* successfully cleared the command, retry if needed */
- err = -EAGAIN;
- /*
- * in case of an error, after clearing the doorbell,
- * we also need to clear the outstanding_request
- * field in hba
- */
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- __clear_bit(lrbp->task_tag, &hba->outstanding_reqs);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
- }
-
- return err;
-}
-
-/**
- * ufshcd_exec_dev_cmd - API for sending device management requests
- * @hba: UFS hba
- * @cmd_type: specifies the type (NOP, Query...)
- * @timeout: timeout in milliseconds
- *
- * NOTE: Since there is only one available tag for device management commands,
- * it is expected you hold the hba->dev_cmd.lock mutex.
- */
-static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
- enum dev_cmd_type cmd_type, int timeout)
-{
- struct request_queue *q = hba->cmd_queue;
- DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
- struct ufshcd_lrb *lrbp;
- int err;
- int tag;
-
- down_read(&hba->clk_scaling_lock);
-
- /*
- * Get free slot, sleep if slots are unavailable.
- * Even though we use wait_event() which sleeps indefinitely,
- * the maximum wait time is bounded by SCSI request timeout.
- */
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- /* Set the timeout such that the SCSI error handler is not activated. */
- req->timeout = msecs_to_jiffies(2 * timeout);
- blk_mq_start_request(req);
-
- lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
- err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
- if (unlikely(err))
- goto out;
-
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag);
- err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-
-out:
- blk_mq_free_request(req);
-out_unlock:
- up_read(&hba->clk_scaling_lock);
- return err;
-}
-
-/**
- * ufshcd_init_query() - init the query response and request parameters
- * @hba: per-adapter instance
- * @request: address of the request pointer to be initialized
- * @response: address of the response pointer to be initialized
- * @opcode: operation to perform
- * @idn: flag idn to access
- * @index: LU number to access
- * @selector: query/flag/descriptor further identification
- */
-static inline void ufshcd_init_query(struct ufs_hba *hba,
- struct ufs_query_req **request, struct ufs_query_res **response,
- enum query_opcode opcode, u8 idn, u8 index, u8 selector)
-{
- *request = &hba->dev_cmd.query.request;
- *response = &hba->dev_cmd.query.response;
- memset(*request, 0, sizeof(struct ufs_query_req));
- memset(*response, 0, sizeof(struct ufs_query_res));
- (*request)->upiu_req.opcode = opcode;
- (*request)->upiu_req.idn = idn;
- (*request)->upiu_req.index = index;
- (*request)->upiu_req.selector = selector;
-}
-
-static int ufshcd_query_flag_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
-{
- int ret;
- int retries;
-
- for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
- ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
- if (ret)
- dev_dbg(hba->dev,
- "%s: failed with error %d, retries %d\n",
- __func__, ret, retries);
- else
- break;
- }
-
- if (ret)
- dev_err(hba->dev,
- "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
- __func__, opcode, idn, ret, retries);
- return ret;
-}
-
-/**
- * ufshcd_query_flag() - API function for sending flag query requests
- * @hba: per-adapter instance
- * @opcode: flag query to perform
- * @idn: flag idn to access
- * @index: flag index to access
- * @flag_res: the flag value after the query request completes
- *
- * Returns 0 for success, non-zero in case of failure
- */
-int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, u8 index, bool *flag_res)
-{
- struct ufs_query_req *request = NULL;
- struct ufs_query_res *response = NULL;
- int err, selector = 0;
- int timeout = QUERY_REQ_TIMEOUT;
-
- BUG_ON(!hba);
-
- ufshcd_hold(hba, false);
- mutex_lock(&hba->dev_cmd.lock);
- ufshcd_init_query(hba, &request, &response, opcode, idn, index,
- selector);
-
- switch (opcode) {
- case UPIU_QUERY_OPCODE_SET_FLAG:
- case UPIU_QUERY_OPCODE_CLEAR_FLAG:
- case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
- break;
- case UPIU_QUERY_OPCODE_READ_FLAG:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- if (!flag_res) {
- /* No dummy reads */
- dev_err(hba->dev, "%s: Invalid argument for read request\n",
- __func__);
- err = -EINVAL;
- goto out_unlock;
- }
- break;
- default:
- dev_err(hba->dev,
- "%s: Expected query flag opcode but got = %d\n",
- __func__, opcode);
- err = -EINVAL;
- goto out_unlock;
- }
-
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
-
- if (err) {
- dev_err(hba->dev,
- "%s: Sending flag query for idn %d failed, err = %d\n",
- __func__, idn, err);
- goto out_unlock;
- }
-
- if (flag_res)
- *flag_res = (be32_to_cpu(response->upiu_res.value) &
- MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
-
-out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_query_attr - API function for sending attribute requests
- * @hba: per-adapter instance
- * @opcode: attribute opcode
- * @idn: attribute idn to access
- * @index: index field
- * @selector: selector field
- * @attr_val: the attribute value after the query request completes
- *
- * Returns 0 for success, non-zero in case of failure
-*/
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
-{
- struct ufs_query_req *request = NULL;
- struct ufs_query_res *response = NULL;
- int err;
-
- BUG_ON(!hba);
-
- if (!attr_val) {
- dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
- __func__, opcode);
- return -EINVAL;
- }
-
- ufshcd_hold(hba, false);
-
- mutex_lock(&hba->dev_cmd.lock);
- ufshcd_init_query(hba, &request, &response, opcode, idn, index,
- selector);
-
- switch (opcode) {
- case UPIU_QUERY_OPCODE_WRITE_ATTR:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
- request->upiu_req.value = cpu_to_be32(*attr_val);
- break;
- case UPIU_QUERY_OPCODE_READ_ATTR:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- break;
- default:
- dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
- __func__, opcode);
- err = -EINVAL;
- goto out_unlock;
- }
-
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
-
- if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
- __func__, opcode, idn, index, err);
- goto out_unlock;
- }
-
- *attr_val = be32_to_cpu(response->upiu_res.value);
-
-out_unlock:
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_query_attr_retry() - API function for sending query
- * attribute with retries
- * @hba: per-adapter instance
- * @opcode: attribute opcode
- * @idn: attribute idn to access
- * @index: index field
- * @selector: selector field
- * @attr_val: the attribute value after the query request
- * completes
- *
- * Returns 0 for success, non-zero in case of failure
-*/
-int ufshcd_query_attr_retry(struct ufs_hba *hba,
- enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
- u32 *attr_val)
-{
- int ret = 0;
- u32 retries;
-
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- ret = ufshcd_query_attr(hba, opcode, idn, index,
- selector, attr_val);
- if (ret)
- dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
- __func__, ret, retries);
- else
- break;
- }
-
- if (ret)
- dev_err(hba->dev,
- "%s: query attribute, idn %d, failed with error %d after %d retires\n",
- __func__, idn, ret, QUERY_REQ_RETRIES);
- return ret;
-}
-
-static int __ufshcd_query_descriptor(struct ufs_hba *hba,
- enum query_opcode opcode, enum desc_idn idn, u8 index,
- u8 selector, u8 *desc_buf, int *buf_len)
-{
- struct ufs_query_req *request = NULL;
- struct ufs_query_res *response = NULL;
- int err;
-
- BUG_ON(!hba);
-
- if (!desc_buf) {
- dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
- __func__, opcode);
- return -EINVAL;
- }
-
- if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
- dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
- __func__, *buf_len);
- return -EINVAL;
- }
-
- ufshcd_hold(hba, false);
-
- mutex_lock(&hba->dev_cmd.lock);
- ufshcd_init_query(hba, &request, &response, opcode, idn, index,
- selector);
- hba->dev_cmd.query.descriptor = desc_buf;
- request->upiu_req.length = cpu_to_be16(*buf_len);
-
- switch (opcode) {
- case UPIU_QUERY_OPCODE_WRITE_DESC:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
- break;
- case UPIU_QUERY_OPCODE_READ_DESC:
- request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
- break;
- default:
- dev_err(hba->dev,
- "%s: Expected query descriptor opcode but got = 0x%.2x\n",
- __func__, opcode);
- err = -EINVAL;
- goto out_unlock;
- }
-
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
-
- if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
- __func__, opcode, idn, index, err);
- goto out_unlock;
- }
-
- *buf_len = be16_to_cpu(response->upiu_res.length);
-
-out_unlock:
- hba->dev_cmd.query.descriptor = NULL;
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_query_descriptor_retry - API function for sending descriptor requests
- * @hba: per-adapter instance
- * @opcode: attribute opcode
- * @idn: attribute idn to access
- * @index: index field
- * @selector: selector field
- * @desc_buf: the buffer that contains the descriptor
- * @buf_len: length parameter passed to the device
- *
- * Returns 0 for success, non-zero in case of failure.
- * The buf_len parameter will contain, on return, the length parameter
- * received on the response.
- */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode,
- enum desc_idn idn, u8 index,
- u8 selector,
- u8 *desc_buf, int *buf_len)
-{
- int err;
- int retries;
-
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- err = __ufshcd_query_descriptor(hba, opcode, idn, index,
- selector, desc_buf, buf_len);
- if (!err || err == -EINVAL)
- break;
- }
-
- return err;
-}
-
-/**
- * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_len: mapped desc length (out)
- */
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_len)
-{
- if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
- desc_id == QUERY_DESC_IDN_RFU_1)
- *desc_len = 0;
- else
- *desc_len = hba->desc_size[desc_id];
-}
-EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
-
-static void ufshcd_update_desc_length(struct ufs_hba *hba,
- enum desc_idn desc_id, int desc_index,
- unsigned char desc_len)
-{
- if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
- desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
- /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
- * than the RPMB unit, however, both descriptors share the same
- * desc_idn, to cover both unit descriptors with one length, we
- * choose the normal unit descriptor length by desc_index.
- */
- hba->desc_size[desc_id] = desc_len;
-}
-
-/**
- * ufshcd_read_desc_param - read the specified descriptor parameter
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_index: descriptor index
- * @param_offset: offset of the parameter to read
- * @param_read_buf: pointer to buffer where parameter would be read
- * @param_size: sizeof(param_read_buf)
- *
- * Return 0 in case of success, non-zero otherwise
- */
-int ufshcd_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- u8 param_offset,
- u8 *param_read_buf,
- u8 param_size)
-{
- int ret;
- u8 *desc_buf;
- int buff_len;
- bool is_kmalloc = true;
-
- /* Safety check */
- if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
- return -EINVAL;
-
- /* Get the length of descriptor */
- ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!buff_len) {
- dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
- return -EINVAL;
- }
-
- if (param_offset >= buff_len) {
- dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
- __func__, param_offset, desc_id, buff_len);
- return -EINVAL;
- }
-
- /* Check whether we need temp memory */
- if (param_offset != 0 || param_size < buff_len) {
- desc_buf = kzalloc(buff_len, GFP_KERNEL);
- if (!desc_buf)
- return -ENOMEM;
- } else {
- desc_buf = param_read_buf;
- is_kmalloc = false;
- }
-
- /* Request for full descriptor */
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0,
- desc_buf, &buff_len);
-
- if (ret) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
- __func__, desc_id, desc_index, param_offset, ret);
- goto out;
- }
-
- /* Sanity check */
- if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
- dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
- __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
- ret = -EINVAL;
- goto out;
- }
-
- /* Update descriptor length */
- buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
- ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
-
- if (is_kmalloc) {
- /* Make sure we don't copy more data than available */
- if (param_offset >= buff_len)
- ret = -EINVAL;
- else
- memcpy(param_read_buf, &desc_buf[param_offset],
- min_t(u32, param_size, buff_len - param_offset));
- }
-out:
- if (is_kmalloc)
- kfree(desc_buf);
- return ret;
-}
-
-/**
- * struct uc_string_id - unicode string
- *
- * @len: size of this descriptor inclusive
- * @type: descriptor type
- * @uc: unicode string character
- */
-struct uc_string_id {
- u8 len;
- u8 type;
- wchar_t uc[];
-} __packed;
-
-/* replace non-printable or non-ASCII characters with spaces */
-static inline char ufshcd_remove_non_printable(u8 ch)
-{
- return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
-}
-
-/**
- * ufshcd_read_string_desc - read string descriptor
- * @hba: pointer to adapter instance
- * @desc_index: descriptor index
- * @buf: pointer to buffer where descriptor would be read,
- * the caller should free the memory.
- * @ascii: if true convert from unicode to ascii characters
- * null terminated string.
- *
- * Return:
- * * string size on success.
- * * -ENOMEM: on allocation failure
- * * -EINVAL: on a wrong parameter
- */
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii)
-{
- struct uc_string_id *uc_str;
- u8 *str;
- int ret;
-
- if (!buf)
- return -EINVAL;
-
- uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
- if (!uc_str)
- return -ENOMEM;
-
- ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
- (u8 *)uc_str, QUERY_DESC_MAX_SIZE);
- if (ret < 0) {
- dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
- QUERY_REQ_RETRIES, ret);
- str = NULL;
- goto out;
- }
-
- if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
- dev_dbg(hba->dev, "String Desc is of zero length\n");
- str = NULL;
- ret = 0;
- goto out;
- }
-
- if (ascii) {
- ssize_t ascii_len;
- int i;
- /* remove header and divide by 2 to move from UTF16 to UTF8 */
- ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
- str = kzalloc(ascii_len, GFP_KERNEL);
- if (!str) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * the descriptor contains string in UTF16 format
- * we need to convert to utf-8 so it can be displayed
- */
- ret = utf16s_to_utf8s(uc_str->uc,
- uc_str->len - QUERY_DESC_HDR_SIZE,
- UTF16_BIG_ENDIAN, str, ascii_len);
-
- /* replace non-printable or non-ASCII characters with spaces */
- for (i = 0; i < ret; i++)
- str[i] = ufshcd_remove_non_printable(str[i]);
-
- str[ret++] = '\0';
-
- } else {
- str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
- if (!str) {
- ret = -ENOMEM;
- goto out;
- }
- ret = uc_str->len;
- }
-out:
- *buf = str;
- kfree(uc_str);
- return ret;
-}
-
-/**
- * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
- * @hba: Pointer to adapter instance
- * @lun: lun id
- * @param_offset: offset of the parameter to read
- * @param_read_buf: pointer to buffer where parameter would be read
- * @param_size: sizeof(param_read_buf)
- *
- * Return 0 in case of success, non-zero otherwise
- */
-static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
- int lun,
- enum unit_desc_param param_offset,
- u8 *param_read_buf,
- u32 param_size)
-{
- /*
- * Unit descriptors are only available for general purpose LUs (LUN id
- * from 0 to 7) and RPMB Well known LU.
- */
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
- return -EOPNOTSUPP;
-
- return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
- param_offset, param_read_buf, param_size);
-}
-
-static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
-{
- int err = 0;
- u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
-
- if (hba->dev_info.wspecversion >= 0x300) {
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
- &gating_wait);
- if (err)
- dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
- err, gating_wait);
-
- if (gating_wait == 0) {
- gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
- dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
- gating_wait);
- }
-
- hba->dev_info.clk_gating_wait_us = gating_wait;
- }
-
- return err;
-}
-
-/**
- * ufshcd_memory_alloc - allocate memory for host memory space data structures
- * @hba: per adapter instance
- *
- * 1. Allocate DMA memory for Command Descriptor array
- * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
- * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
- * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
- * (UTMRDL)
- * 4. Allocate memory for local reference block(lrb).
- *
- * Returns 0 for success, non-zero in case of failure
- */
-static int ufshcd_memory_alloc(struct ufs_hba *hba)
-{
- size_t utmrdl_size, utrdl_size, ucdl_size;
-
- /* Allocate memory for UTP command descriptors */
- ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
- ucdl_size,
- &hba->ucdl_dma_addr,
- GFP_KERNEL);
-
- /*
- * UFSHCI requires UTP command descriptor to be 128 byte aligned.
- * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
- * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
- * be aligned to 128 bytes as well
- */
- if (!hba->ucdl_base_addr ||
- WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(hba->dev,
- "Command Descriptor Memory allocation failed\n");
- goto out;
- }
-
- /*
- * Allocate memory for UTP Transfer descriptors
- * UFSHCI requires 1024 byte alignment of UTRD
- */
- utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
- utrdl_size,
- &hba->utrdl_dma_addr,
- GFP_KERNEL);
- if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(hba->dev,
- "Transfer Descriptor Memory allocation failed\n");
- goto out;
- }
-
- /*
- * Allocate memory for UTP Task Management descriptors
- * UFSHCI requires 1024 byte alignment of UTMRD
- */
- utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
- utmrdl_size,
- &hba->utmrdl_dma_addr,
- GFP_KERNEL);
- if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(hba->dev,
- "Task Management Descriptor Memory allocation failed\n");
- goto out;
- }
-
- /* Allocate memory for local reference block */
- hba->lrb = devm_kcalloc(hba->dev,
- hba->nutrs, sizeof(struct ufshcd_lrb),
- GFP_KERNEL);
- if (!hba->lrb) {
- dev_err(hba->dev, "LRB Memory allocation failed\n");
- goto out;
- }
- return 0;
-out:
- return -ENOMEM;
-}
-
-/**
- * ufshcd_host_memory_configure - configure local reference block with
- * memory offsets
- * @hba: per adapter instance
- *
- * Configure Host memory space
- * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
- * address.
- * 2. Update each UTRD with Response UPIU offset, Response UPIU length
- * and PRDT offset.
- * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
- * into local reference block.
- */
-static void ufshcd_host_memory_configure(struct ufs_hba *hba)
-{
- struct utp_transfer_req_desc *utrdlp;
- dma_addr_t cmd_desc_dma_addr;
- dma_addr_t cmd_desc_element_addr;
- u16 response_offset;
- u16 prdt_offset;
- int cmd_desc_size;
- int i;
-
- utrdlp = hba->utrdl_base_addr;
-
- response_offset =
- offsetof(struct utp_transfer_cmd_desc, response_upiu);
- prdt_offset =
- offsetof(struct utp_transfer_cmd_desc, prd_table);
-
- cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
- cmd_desc_dma_addr = hba->ucdl_dma_addr;
-
- for (i = 0; i < hba->nutrs; i++) {
- /* Configure UTRD with command descriptor base address */
- cmd_desc_element_addr =
- (cmd_desc_dma_addr + (cmd_desc_size * i));
- utrdlp[i].command_desc_base_addr_lo =
- cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
- utrdlp[i].command_desc_base_addr_hi =
- cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
-
- /* Response upiu and prdt offset should be in double words */
- if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16(response_offset);
- utrdlp[i].prd_table_offset =
- cpu_to_le16(prdt_offset);
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE);
- } else {
- utrdlp[i].response_upiu_offset =
- cpu_to_le16(response_offset >> 2);
- utrdlp[i].prd_table_offset =
- cpu_to_le16(prdt_offset >> 2);
- utrdlp[i].response_upiu_length =
- cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
- }
-
- ufshcd_init_lrb(hba, &hba->lrb[i], i);
- }
-}
-
-/**
- * ufshcd_dme_link_startup - Notify Unipro to perform link startup
- * @hba: per adapter instance
- *
- * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
- * in order to initialize the Unipro link startup procedure.
- * Once the Unipro links are up, the device connected to the controller
- * is detected.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_link_startup(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_dbg(hba->dev,
- "dme-link-startup: error code %d\n", ret);
- return ret;
-}
-/**
- * ufshcd_dme_reset - UIC command for DME_RESET
- * @hba: per adapter instance
- *
- * DME_RESET command is issued in order to reset UniPro stack.
- * This function now deals with cold reset.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_reset(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_RESET;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
-
- return ret;
-}
-
-int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
- int agreed_gear,
- int adapt_val)
-{
- int ret;
-
- if (agreed_gear != UFS_HS_G4)
- adapt_val = PA_NO_ADAPT;
-
- ret = ufshcd_dme_set(hba,
- UIC_ARG_MIB(PA_TXHSADAPTTYPE),
- adapt_val);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
-
-/**
- * ufshcd_dme_enable - UIC command for DME_ENABLE
- * @hba: per adapter instance
- *
- * DME_ENABLE command is issued in order to enable UniPro stack.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_dme_enable(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- uic_cmd.command = UIC_CMD_DME_ENABLE;
-
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev,
- "dme-enable: error code %d\n", ret);
-
- return ret;
-}
-
-static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
-{
- #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
- unsigned long min_sleep_time_us;
-
- if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
- return;
-
- /*
- * last_dme_cmd_tstamp will be 0 only for 1st call to
- * this function
- */
- if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
- min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
- } else {
- unsigned long delta =
- (unsigned long) ktime_to_us(
- ktime_sub(ktime_get(),
- hba->last_dme_cmd_tstamp));
-
- if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
- min_sleep_time_us =
- MIN_DELAY_BEFORE_DME_CMDS_US - delta;
- else
- return; /* no more delay required */
- }
-
- /* allow sleep for extra 50us if needed */
- usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
-}
-
-/**
- * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
- * @hba: per adapter instance
- * @attr_sel: uic command argument1
- * @attr_set: attribute set type as uic command argument2
- * @mib_val: setting value as uic command argument3
- * @peer: indicate whether peer or local
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
- u8 attr_set, u32 mib_val, u8 peer)
-{
- struct uic_command uic_cmd = {0};
- static const char *const action[] = {
- "dme-set",
- "dme-peer-set"
- };
- const char *set = action[!!peer];
- int ret;
- int retries = UFS_UIC_COMMAND_RETRIES;
-
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
- uic_cmd.argument1 = attr_sel;
- uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
- uic_cmd.argument3 = mib_val;
-
- do {
- /* for peer attributes we retry upon failure */
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
- } while (ret && peer && --retries);
-
- if (ret)
- dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val,
- UFS_UIC_COMMAND_RETRIES - retries);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
-
-/**
- * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
- * @hba: per adapter instance
- * @attr_sel: uic command argument1
- * @mib_val: the value of the attribute as returned by the UIC command
- * @peer: indicate whether peer or local
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
- u32 *mib_val, u8 peer)
-{
- struct uic_command uic_cmd = {0};
- static const char *const action[] = {
- "dme-get",
- "dme-peer-get"
- };
- const char *get = action[!!peer];
- int ret;
- int retries = UFS_UIC_COMMAND_RETRIES;
- struct ufs_pa_layer_attr orig_pwr_info;
- struct ufs_pa_layer_attr temp_pwr_info;
- bool pwr_mode_change = false;
-
- if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
- orig_pwr_info = hba->pwr_info;
- temp_pwr_info = orig_pwr_info;
-
- if (orig_pwr_info.pwr_tx == FAST_MODE ||
- orig_pwr_info.pwr_rx == FAST_MODE) {
- temp_pwr_info.pwr_tx = FASTAUTO_MODE;
- temp_pwr_info.pwr_rx = FASTAUTO_MODE;
- pwr_mode_change = true;
- } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
- orig_pwr_info.pwr_rx == SLOW_MODE) {
- temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
- temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
- pwr_mode_change = true;
- }
- if (pwr_mode_change) {
- ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
- if (ret)
- goto out;
- }
- }
-
- uic_cmd.command = peer ?
- UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
- uic_cmd.argument1 = attr_sel;
-
- do {
- /* for peer attributes we retry upon failure */
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
- get, UIC_GET_ATTR_ID(attr_sel), ret);
- } while (ret && peer && --retries);
-
- if (ret)
- dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
- get, UIC_GET_ATTR_ID(attr_sel),
- UFS_UIC_COMMAND_RETRIES - retries);
-
- if (mib_val && !ret)
- *mib_val = uic_cmd.argument3;
-
- if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
- && pwr_mode_change)
- ufshcd_change_power_mode(hba, &orig_pwr_info);
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
-
-/**
- * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
- * state) and waits for it to take effect.
- *
- * @hba: per adapter instance
- * @cmd: UIC command to execute
- *
- * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
- * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
- * and device UniPro link and hence it's final completion would be indicated by
- * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
- * addition to normal UIC command completion Status (UCCS). This function only
- * returns after the relevant status bits indicate the completion.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
-{
- DECLARE_COMPLETION_ONSTACK(uic_async_done);
- unsigned long flags;
- u8 status;
- int ret;
- bool reenable_intr = false;
-
- mutex_lock(&hba->uic_cmd_mutex);
- ufshcd_add_delay_before_dme_cmd(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ufshcd_is_link_broken(hba)) {
- ret = -ENOLINK;
- goto out_unlock;
- }
- hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- wmb();
- reenable_intr = true;
- }
- ret = __ufshcd_send_uic_cmd(hba, cmd, false);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (ret) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
- cmd->command, cmd->argument3, ret);
- goto out;
- }
-
- if (!wait_for_completion_timeout(hba->uic_async_done,
- msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
- cmd->command, cmd->argument3);
-
- if (!cmd->cmd_active) {
- dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
- __func__);
- goto check_upmcrs;
- }
-
- ret = -ETIMEDOUT;
- goto out;
- }
-
-check_upmcrs:
- status = ufshcd_get_upmcrs(hba);
- if (status != PWR_LOCAL) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
- cmd->command, status);
- ret = (status != PWR_OK) ? status : -1;
- }
-out:
- if (ret) {
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_evt_hist(hba);
- }
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->active_uic_cmd = NULL;
- hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
- if (ret) {
- ufshcd_set_link_broken(hba);
- ufshcd_schedule_eh_work(hba);
- }
-out_unlock:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- mutex_unlock(&hba->uic_cmd_mutex);
-
- return ret;
-}
-
-/**
- * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
- * using DME_SET primitives.
- * @hba: per adapter instance
- * @mode: powr mode value
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
-{
- struct uic_command uic_cmd = {0};
- int ret;
-
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
- ret = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
- if (ret) {
- dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
- __func__, ret);
- goto out;
- }
- }
-
- uic_cmd.command = UIC_CMD_DME_SET;
- uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
- uic_cmd.argument3 = mode;
- ufshcd_hold(hba, false);
- ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- ufshcd_release(hba);
-
-out:
- return ret;
-}
-
-int ufshcd_link_recovery(struct ufs_hba *hba)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* Reset the attached device */
- ufshcd_device_reset(hba);
-
- ret = ufshcd_host_reset_and_restore(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- ufshcd_clear_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (ret)
- dev_err(hba->dev, "%s: link recovery failed, err %d",
- __func__, ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
-
-int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
-{
- int ret;
- struct uic_command uic_cmd = {0};
- ktime_t start = ktime_get();
-
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
-
- uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
- ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
-
- if (ret)
- dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
- __func__, ret);
- else
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
- POST_CHANGE);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
-
-int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
-{
- struct uic_command uic_cmd = {0};
- int ret;
- ktime_t start = ktime_get();
-
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
-
- uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
- ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
-
- if (ret) {
- dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
- __func__, ret);
- } else {
- ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
- POST_CHANGE);
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
- hba->ufs_stats.hibern8_exit_cnt++;
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
-
-void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
-{
- unsigned long flags;
- bool update = false;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ahit != ahit) {
- hba->ahit = ahit;
- update = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (update &&
- !pm_runtime_suspended(&hba->sdev_ufs_device->sdev_gendev)) {
- ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
- ufshcd_auto_hibern8_enable(hba);
- ufshcd_release(hba);
- ufshcd_rpm_put_sync(hba);
- }
-}
-EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
-
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
-{
- unsigned long flags;
-
- if (!ufshcd_is_auto_hibern8_supported(hba))
- return;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-}
-
- /**
- * ufshcd_init_pwr_info - setting the POR (power on reset)
- * values in hba power info
- * @hba: per-adapter instance
- */
-static void ufshcd_init_pwr_info(struct ufs_hba *hba)
-{
- hba->pwr_info.gear_rx = UFS_PWM_G1;
- hba->pwr_info.gear_tx = UFS_PWM_G1;
- hba->pwr_info.lane_rx = 1;
- hba->pwr_info.lane_tx = 1;
- hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
- hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
- hba->pwr_info.hs_rate = 0;
-}
-
-/**
- * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
- * @hba: per-adapter instance
- */
-static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
-{
- struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
-
- if (hba->max_pwr_info.is_valid)
- return 0;
-
- pwr_info->pwr_tx = FAST_MODE;
- pwr_info->pwr_rx = FAST_MODE;
- pwr_info->hs_rate = PA_HS_MODE_B;
-
- /* Get the connected lane count */
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
- &pwr_info->lane_rx);
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
- &pwr_info->lane_tx);
-
- if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
- dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
- __func__,
- pwr_info->lane_rx,
- pwr_info->lane_tx);
- return -EINVAL;
- }
-
- /*
- * First, get the maximum gears of HS speed.
- * If a zero value, it means there is no HSGEAR capability.
- * Then, get the maximum gears of PWM speed.
- */
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
- if (!pwr_info->gear_rx) {
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
- &pwr_info->gear_rx);
- if (!pwr_info->gear_rx) {
- dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
- __func__, pwr_info->gear_rx);
- return -EINVAL;
- }
- pwr_info->pwr_rx = SLOW_MODE;
- }
-
- ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
- &pwr_info->gear_tx);
- if (!pwr_info->gear_tx) {
- ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
- &pwr_info->gear_tx);
- if (!pwr_info->gear_tx) {
- dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
- __func__, pwr_info->gear_tx);
- return -EINVAL;
- }
- pwr_info->pwr_tx = SLOW_MODE;
- }
-
- hba->max_pwr_info.is_valid = true;
- return 0;
-}
-
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_mode)
-{
- int ret;
-
- /* if already configured to the requested pwr_mode */
- if (!hba->force_pmc &&
- pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
- pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
- pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
- pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
- pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
- pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
- pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
- dev_dbg(hba->dev, "%s: power already configured\n", __func__);
- return 0;
- }
-
- /*
- * Configure attributes for power mode change with below.
- * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
- * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
- * - PA_HSSERIES
- */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
- pwr_mode->lane_rx);
- if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
- pwr_mode->pwr_rx == FAST_MODE)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
- else
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
- pwr_mode->lane_tx);
- if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
- pwr_mode->pwr_tx == FAST_MODE)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
- else
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
-
- if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
- pwr_mode->pwr_tx == FASTAUTO_MODE ||
- pwr_mode->pwr_rx == FAST_MODE ||
- pwr_mode->pwr_tx == FAST_MODE)
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
- pwr_mode->hs_rate);
-
- if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
- DL_AFC0ReqTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
- DL_FC1ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
- DL_TC1ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
- DL_AFC1ReqTimeOutVal_Default);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
- DL_AFC0ReqTimeOutVal_Default);
- }
-
- ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
- | pwr_mode->pwr_tx);
-
- if (ret) {
- dev_err(hba->dev,
- "%s: power mode change failed %d\n", __func__, ret);
- } else {
- ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
- pwr_mode);
-
- memcpy(&hba->pwr_info, pwr_mode,
- sizeof(struct ufs_pa_layer_attr));
- }
-
- return ret;
-}
-
-/**
- * ufshcd_config_pwr_mode - configure a new power mode
- * @hba: per-adapter instance
- * @desired_pwr_mode: desired power configuration
- */
-int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode)
-{
- struct ufs_pa_layer_attr final_params = { 0 };
- int ret;
-
- ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
- desired_pwr_mode, &final_params);
-
- if (ret)
- memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
-
- ret = ufshcd_change_power_mode(hba, &final_params);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
-
-/**
- * ufshcd_complete_dev_init() - checks device readiness
- * @hba: per-adapter instance
- *
- * Set fDeviceInit flag and poll until device toggles it.
- */
-static int ufshcd_complete_dev_init(struct ufs_hba *hba)
-{
- int err;
- bool flag_res = true;
- ktime_t timeout;
-
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
- if (err) {
- dev_err(hba->dev,
- "%s setting fDeviceInit flag failed with error %d\n",
- __func__, err);
- goto out;
- }
-
- /* Poll fDeviceInit flag to be cleared */
- timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
- do {
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
- if (!flag_res)
- break;
- usleep_range(5000, 10000);
- } while (ktime_before(ktime_get(), timeout));
-
- if (err) {
- dev_err(hba->dev,
- "%s reading fDeviceInit flag failed with error %d\n",
- __func__, err);
- } else if (flag_res) {
- dev_err(hba->dev,
- "%s fDeviceInit was not cleared by the device\n",
- __func__);
- err = -EBUSY;
- }
-out:
- return err;
-}
-
-/**
- * ufshcd_make_hba_operational - Make UFS controller operational
- * @hba: per adapter instance
- *
- * To bring UFS host controller to operational state,
- * 1. Enable required interrupts
- * 2. Configure interrupt aggregation
- * 3. Program UTRL and UTMRL base address
- * 4. Configure run-stop-registers
- *
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_make_hba_operational(struct ufs_hba *hba)
-{
- int err = 0;
- u32 reg;
-
- /* Enable required interrupts */
- ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
-
- /* Configure interrupt aggregation */
- if (ufshcd_is_intr_aggr_allowed(hba))
- ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
- else
- ufshcd_disable_intr_aggr(hba);
-
- /* Configure UTRL and UTMRL base address registers */
- ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_H);
- ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_H);
-
- /*
- * Make sure base address and interrupt setup are updated before
- * enabling the run/stop registers below.
- */
- wmb();
-
- /*
- * UCRDY, UTMRLDY and UTRLRDY bits must be 1
- */
- reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
- if (!(ufshcd_get_lists_status(reg))) {
- ufshcd_enable_run_stop_reg(hba);
- } else {
- dev_err(hba->dev,
- "Host controller not ready to process requests");
- err = -EIO;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
-
-/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-void ufshcd_hba_stop(struct ufs_hba *hba)
-{
- unsigned long flags;
- int err;
-
- /*
- * Obtain the host lock to prevent that the controller is disabled
- * while the UFS interrupt handler is active on another CPU.
- */
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
- CONTROLLER_ENABLE, CONTROLLER_DISABLE,
- 10, 1);
- if (err)
- dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
-}
-EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
-
-/**
- * ufshcd_hba_execute_hce - initialize the controller
- * @hba: per adapter instance
- *
- * The controller resets itself and controller firmware initialization
- * sequence kicks off. When controller is ready it will set
- * the Host Controller Enable bit to 1.
- *
- * Returns 0 on success, non-zero value on failure
- */
-static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
-{
- int retry_outer = 3;
- int retry_inner;
-
-start:
- if (!ufshcd_is_hba_active(hba))
- /* change controller state to "reset state" */
- ufshcd_hba_stop(hba);
-
- /* UniPro link is disabled at this point */
- ufshcd_set_link_off(hba);
-
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
-
- /* start controller initialization sequence */
- ufshcd_hba_start(hba);
-
- /*
- * To initialize a UFS host controller HCE bit must be set to 1.
- * During initialization the HCE bit value changes from 1->0->1.
- * When the host controller completes initialization sequence
- * it sets the value of HCE bit to 1. The same HCE bit is read back
- * to check if the controller has completed initialization sequence.
- * So without this delay the value HCE = 1, set in the previous
- * instruction might be read back.
- * This delay can be changed based on the controller.
- */
- ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
-
- /* wait for the host controller to complete initialization */
- retry_inner = 50;
- while (ufshcd_is_hba_active(hba)) {
- if (retry_inner) {
- retry_inner--;
- } else {
- dev_err(hba->dev,
- "Controller enable failed\n");
- if (retry_outer) {
- retry_outer--;
- goto start;
- }
- return -EIO;
- }
- usleep_range(1000, 1100);
- }
-
- /* enable UIC related interrupts */
- ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
-
- ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
-
- return 0;
-}
-
-int ufshcd_hba_enable(struct ufs_hba *hba)
-{
- int ret;
-
- if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
- ufshcd_set_link_off(hba);
- ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
-
- /* enable UIC related interrupts */
- ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
- ret = ufshcd_dme_reset(hba);
- if (!ret) {
- ret = ufshcd_dme_enable(hba);
- if (!ret)
- ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
- if (ret)
- dev_err(hba->dev,
- "Host controller enable failed with non-hce\n");
- }
- } else {
- ret = ufshcd_hba_execute_hce(hba);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
-
-static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
-{
- int tx_lanes = 0, i, err = 0;
-
- if (!peer)
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
- &tx_lanes);
- else
- ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
- &tx_lanes);
- for (i = 0; i < tx_lanes; i++) {
- if (!peer)
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
- 0);
- else
- err = ufshcd_dme_peer_set(hba,
- UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
- 0);
- if (err) {
- dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
- __func__, peer, i, err);
- break;
- }
- }
-
- return err;
-}
-
-static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
-{
- return ufshcd_disable_tx_lcc(hba, true);
-}
-
-void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
-{
- struct ufs_event_hist *e;
-
- if (id >= UFS_EVT_CNT)
- return;
-
- e = &hba->ufs_stats.event[id];
- e->val[e->pos] = val;
- e->tstamp[e->pos] = ktime_get();
- e->cnt += 1;
- e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
-
- ufshcd_vops_event_notify(hba, id, &val);
-}
-EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
-
-/**
- * ufshcd_link_startup - Initialize unipro link startup
- * @hba: per adapter instance
- *
- * Returns 0 for success, non-zero in case of failure
- */
-static int ufshcd_link_startup(struct ufs_hba *hba)
-{
- int ret;
- int retries = DME_LINKSTARTUP_RETRIES;
- bool link_startup_again = false;
-
- /*
- * If UFS device isn't active then we will have to issue link startup
- * 2 times to make sure the device state move to active.
- */
- if (!ufshcd_is_ufs_dev_active(hba))
- link_startup_again = true;
-
-link_startup:
- do {
- ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
-
- ret = ufshcd_dme_link_startup(hba);
-
- /* check if device is detected by inter-connect layer */
- if (!ret && !ufshcd_is_device_present(hba)) {
- ufshcd_update_evt_hist(hba,
- UFS_EVT_LINK_STARTUP_FAIL,
- 0);
- dev_err(hba->dev, "%s: Device not present\n", __func__);
- ret = -ENXIO;
- goto out;
- }
-
- /*
- * DME link lost indication is only received when link is up,
- * but we can't be sure if the link is up until link startup
- * succeeds. So reset the local Uni-Pro and try again.
- */
- if (ret && ufshcd_hba_enable(hba)) {
- ufshcd_update_evt_hist(hba,
- UFS_EVT_LINK_STARTUP_FAIL,
- (u32)ret);
- goto out;
- }
- } while (ret && retries--);
-
- if (ret) {
- /* failed to get the link up... retire */
- ufshcd_update_evt_hist(hba,
- UFS_EVT_LINK_STARTUP_FAIL,
- (u32)ret);
- goto out;
- }
-
- if (link_startup_again) {
- link_startup_again = false;
- retries = DME_LINKSTARTUP_RETRIES;
- goto link_startup;
- }
-
- /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
- ufshcd_init_pwr_info(hba);
- ufshcd_print_pwr_info(hba);
-
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
- ret = ufshcd_disable_device_tx_lcc(hba);
- if (ret)
- goto out;
- }
-
- /* Include any host controller configuration via UIC commands */
- ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
- if (ret)
- goto out;
-
- /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
- ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
- ret = ufshcd_make_hba_operational(hba);
-out:
- if (ret) {
- dev_err(hba->dev, "link startup failed %d\n", ret);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_evt_hist(hba);
- }
- return ret;
-}
-
-/**
- * ufshcd_verify_dev_init() - Verify device initialization
- * @hba: per-adapter instance
- *
- * Send NOP OUT UPIU and wait for NOP IN response to check whether the
- * device Transport Protocol (UTP) layer is ready after a reset.
- * If the UTP layer at the device side is not initialized, it may
- * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
- * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
- */
-static int ufshcd_verify_dev_init(struct ufs_hba *hba)
-{
- int err = 0;
- int retries;
-
- ufshcd_hold(hba, false);
- mutex_lock(&hba->dev_cmd.lock);
- for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
- hba->nop_out_timeout);
-
- if (!err || err == -ETIMEDOUT)
- break;
-
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
- }
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
-
- if (err)
- dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
- return err;
-}
-
-/**
- * ufshcd_set_queue_depth - set lun queue depth
- * @sdev: pointer to SCSI device
- *
- * Read bLUQueueDepth value and activate scsi tagged command
- * queueing. For WLUN, queue depth is set to 1. For best-effort
- * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
- * value that host can queue.
- */
-static void ufshcd_set_queue_depth(struct scsi_device *sdev)
-{
- int ret = 0;
- u8 lun_qdepth;
- struct ufs_hba *hba;
-
- hba = shost_priv(sdev->host);
-
- lun_qdepth = hba->nutrs;
- ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
-
- /* Some WLUN doesn't support unit descriptor */
- if (ret == -EOPNOTSUPP)
- lun_qdepth = 1;
- else if (!lun_qdepth)
- /* eventually, we can figure out the real queue depth */
- lun_qdepth = hba->nutrs;
- else
- lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
-
- dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
- __func__, lun_qdepth);
- scsi_change_queue_depth(sdev, lun_qdepth);
-}
-
-/*
- * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
- * @hba: per-adapter instance
- * @lun: UFS device lun id
- * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
- *
- * Returns 0 in case of success and b_lu_write_protect status would be returned
- * @b_lu_write_protect parameter.
- * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
- * Returns -EINVAL in case of invalid parameters passed to this function.
- */
-static int ufshcd_get_lu_wp(struct ufs_hba *hba,
- u8 lun,
- u8 *b_lu_write_protect)
-{
- int ret;
-
- if (!b_lu_write_protect)
- ret = -EINVAL;
- /*
- * According to UFS device spec, RPMB LU can't be write
- * protected so skip reading bLUWriteProtect parameter for
- * it. For other W-LUs, UNIT DESCRIPTOR is not available.
- */
- else if (lun >= hba->dev_info.max_lu_supported)
- ret = -ENOTSUPP;
- else
- ret = ufshcd_read_unit_desc_param(hba,
- lun,
- UNIT_DESC_PARAM_LU_WR_PROTECT,
- b_lu_write_protect,
- sizeof(*b_lu_write_protect));
- return ret;
-}
-
-/**
- * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
- * status
- * @hba: per-adapter instance
- * @sdev: pointer to SCSI device
- *
- */
-static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
- struct scsi_device *sdev)
-{
- if (hba->dev_info.f_power_on_wp_en &&
- !hba->dev_info.is_lu_power_on_wp) {
- u8 b_lu_write_protect;
-
- if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
- &b_lu_write_protect) &&
- (b_lu_write_protect == UFS_LU_POWER_ON_WP))
- hba->dev_info.is_lu_power_on_wp = true;
- }
-}
-
-/**
- * ufshcd_setup_links - associate link b/w device wlun and other luns
- * @sdev: pointer to SCSI device
- * @hba: pointer to ufs hba
- */
-static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- struct device_link *link;
-
- /*
- * Device wlun is the supplier & rest of the luns are consumers.
- * This ensures that device wlun suspends after all other luns.
- */
- if (hba->sdev_ufs_device) {
- link = device_link_add(&sdev->sdev_gendev,
- &hba->sdev_ufs_device->sdev_gendev,
- DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
- if (!link) {
- dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
- dev_name(&hba->sdev_ufs_device->sdev_gendev));
- return;
- }
- hba->luns_avail--;
- /* Ignore REPORT_LUN wlun probing */
- if (hba->luns_avail == 1) {
- ufshcd_rpm_put(hba);
- return;
- }
- } else {
- /*
- * Device wlun is probed. The assumption is that WLUNs are
- * scanned before other LUNs.
- */
- hba->luns_avail--;
- }
-}
-
-/**
- * ufshcd_slave_alloc - handle initial SCSI device configurations
- * @sdev: pointer to SCSI device
- *
- * Returns success
- */
-static int ufshcd_slave_alloc(struct scsi_device *sdev)
-{
- struct ufs_hba *hba;
-
- hba = shost_priv(sdev->host);
-
- /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
- sdev->use_10_for_ms = 1;
-
- /* DBD field should be set to 1 in mode sense(10) */
- sdev->set_dbd_for_ms = 1;
-
- /* allow SCSI layer to restart the device in case of errors */
- sdev->allow_restart = 1;
-
- /* REPORT SUPPORTED OPERATION CODES is not supported */
- sdev->no_report_opcodes = 1;
-
- /* WRITE_SAME command is not supported */
- sdev->no_write_same = 1;
-
- ufshcd_set_queue_depth(sdev);
-
- ufshcd_get_lu_power_on_wp_status(hba, sdev);
-
- ufshcd_setup_links(hba, sdev);
-
- return 0;
-}
-
-/**
- * ufshcd_change_queue_depth - change queue depth
- * @sdev: pointer to SCSI device
- * @depth: required depth to set
- *
- * Change queue depth and make sure the max. limits are not crossed.
- */
-static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
-{
- struct ufs_hba *hba = shost_priv(sdev->host);
-
- if (depth > hba->nutrs)
- depth = hba->nutrs;
- return scsi_change_queue_depth(sdev, depth);
-}
-
-static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- /* skip well-known LU */
- if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
- !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
- return;
-
- ufshpb_destroy_lu(hba, sdev);
-}
-
-static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- /* skip well-known LU */
- if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
- !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
- return;
-
- ufshpb_init_hpb_lu(hba, sdev);
-}
-
-/**
- * ufshcd_slave_configure - adjust SCSI device configurations
- * @sdev: pointer to SCSI device
- */
-static int ufshcd_slave_configure(struct scsi_device *sdev)
-{
- struct ufs_hba *hba = shost_priv(sdev->host);
- struct request_queue *q = sdev->request_queue;
-
- ufshcd_hpb_configure(hba, sdev);
-
- blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
- blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
- /*
- * Block runtime-pm until all consumers are added.
- * Refer ufshcd_setup_links().
- */
- if (is_device_wlun(sdev))
- pm_runtime_get_noresume(&sdev->sdev_gendev);
- else if (ufshcd_is_rpm_autosuspend_allowed(hba))
- sdev->rpm_autosuspend = 1;
-
- ufshcd_crypto_register(hba, q);
-
- return 0;
-}
-
-/**
- * ufshcd_slave_destroy - remove SCSI device configurations
- * @sdev: pointer to SCSI device
- */
-static void ufshcd_slave_destroy(struct scsi_device *sdev)
-{
- struct ufs_hba *hba;
- unsigned long flags;
-
- hba = shost_priv(sdev->host);
-
- ufshcd_hpb_destroy(hba, sdev);
-
- /* Drop the reference as it won't be needed anymore */
- if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->sdev_ufs_device = NULL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- } else if (hba->sdev_ufs_device) {
- struct device *supplier = NULL;
-
- /* Ensure UFS Device WLUN exists and does not disappear */
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->sdev_ufs_device) {
- supplier = &hba->sdev_ufs_device->sdev_gendev;
- get_device(supplier);
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (supplier) {
- /*
- * If a LUN fails to probe (e.g. absent BOOT WLUN), the
- * device will not have been registered but can still
- * have a device link holding a reference to the device.
- */
- device_link_remove(&sdev->sdev_gendev, supplier);
- put_device(supplier);
- }
- }
-}
-
-/**
- * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
- * @lrbp: pointer to local reference block of completed command
- * @scsi_status: SCSI command status
- *
- * Returns value base on SCSI command status
- */
-static inline int
-ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
-{
- int result = 0;
-
- switch (scsi_status) {
- case SAM_STAT_CHECK_CONDITION:
- ufshcd_copy_sense_data(lrbp);
- fallthrough;
- case SAM_STAT_GOOD:
- result |= DID_OK << 16 | scsi_status;
- break;
- case SAM_STAT_TASK_SET_FULL:
- case SAM_STAT_BUSY:
- case SAM_STAT_TASK_ABORTED:
- ufshcd_copy_sense_data(lrbp);
- result |= scsi_status;
- break;
- default:
- result |= DID_ERROR << 16;
- break;
- } /* end of switch */
-
- return result;
-}
-
-/**
- * ufshcd_transfer_rsp_status - Get overall status of the response
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block of completed command
- *
- * Returns result of the command to notify SCSI midlayer
- */
-static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- int result = 0;
- int scsi_status;
- enum utp_ocs ocs;
-
- /* overall command status of utrd */
- ocs = ufshcd_get_tr_ocs(lrbp);
-
- if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
- if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
- MASK_RSP_UPIU_RESULT)
- ocs = OCS_SUCCESS;
- }
-
- switch (ocs) {
- case OCS_SUCCESS:
- result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- switch (result) {
- case UPIU_TRANSACTION_RESPONSE:
- /*
- * get the response UPIU result to extract
- * the SCSI command status
- */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
-
- /*
- * get the result based on SCSI status response
- * to notify the SCSI midlayer of the command status
- */
- scsi_status = result & MASK_SCSI_STATUS;
- result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
-
- /*
- * Currently we are only supporting BKOPs exception
- * events hence we can ignore BKOPs exception event
- * during power management callbacks. BKOPs exception
- * event is not expected to be raised in runtime suspend
- * callback as it allows the urgent bkops.
- * During system suspend, we are anyway forcefully
- * disabling the bkops and if urgent bkops is needed
- * it will be enabled on system resume. Long term
- * solution could be to abort the system suspend if
- * UFS device needs urgent BKOPs.
- */
- if (!hba->pm_op_in_progress &&
- !ufshcd_eh_in_progress(hba) &&
- ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
- /* Flushed in suspend */
- schedule_work(&hba->eeh_work);
-
- if (scsi_status == SAM_STAT_GOOD)
- ufshpb_rsp_upiu(hba, lrbp);
- break;
- case UPIU_TRANSACTION_REJECT_UPIU:
- /* TODO: handle Reject UPIU Response */
- result = DID_ERROR << 16;
- dev_err(hba->dev,
- "Reject UPIU not fully implemented\n");
- break;
- default:
- dev_err(hba->dev,
- "Unexpected request response code = %x\n",
- result);
- result = DID_ERROR << 16;
- break;
- }
- break;
- case OCS_ABORTED:
- result |= DID_ABORT << 16;
- break;
- case OCS_INVALID_COMMAND_STATUS:
- result |= DID_REQUEUE << 16;
- break;
- case OCS_INVALID_CMD_TABLE_ATTR:
- case OCS_INVALID_PRDT_ATTR:
- case OCS_MISMATCH_DATA_BUF_SIZE:
- case OCS_MISMATCH_RESP_UPIU_SIZE:
- case OCS_PEER_COMM_FAILURE:
- case OCS_FATAL_ERROR:
- case OCS_DEVICE_FATAL_ERROR:
- case OCS_INVALID_CRYPTO_CONFIG:
- case OCS_GENERAL_CRYPTO_ERROR:
- default:
- result |= DID_ERROR << 16;
- dev_err(hba->dev,
- "OCS error from controller = %x for tag %d\n",
- ocs, lrbp->task_tag);
- ufshcd_print_evt_hist(hba);
- ufshcd_print_host_state(hba);
- break;
- } /* end of switch */
-
- if ((host_byte(result) != DID_OK) &&
- (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
- ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
- return result;
-}
-
-static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
- u32 intr_mask)
-{
- if (!ufshcd_is_auto_hibern8_supported(hba) ||
- !ufshcd_is_auto_hibern8_enabled(hba))
- return false;
-
- if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
- return false;
-
- if (hba->active_uic_cmd &&
- (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
- hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
- return false;
-
- return true;
-}
-
-/**
- * ufshcd_uic_cmd_compl - handle completion of uic command
- * @hba: per adapter instance
- * @intr_status: interrupt status generated by the controller
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
-{
- irqreturn_t retval = IRQ_NONE;
-
- spin_lock(hba->host->host_lock);
- if (ufshcd_is_auto_hibern8_error(hba, intr_status))
- hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
-
- if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
- hba->active_uic_cmd->argument2 |=
- ufshcd_get_uic_cmd_result(hba);
- hba->active_uic_cmd->argument3 =
- ufshcd_get_dme_attr_val(hba);
- if (!hba->uic_async_done)
- hba->active_uic_cmd->cmd_active = 0;
- complete(&hba->active_uic_cmd->done);
- retval = IRQ_HANDLED;
- }
-
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
- hba->active_uic_cmd->cmd_active = 0;
- complete(hba->uic_async_done);
- retval = IRQ_HANDLED;
- }
-
- if (retval == IRQ_HANDLED)
- ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
- UFS_CMD_COMP);
- spin_unlock(hba->host->host_lock);
- return retval;
-}
-
-/**
- * __ufshcd_transfer_req_compl - handle SCSI and query command completion
- * @hba: per adapter instance
- * @completed_reqs: bitmask that indicates which requests to complete
- */
-static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
-{
- struct ufshcd_lrb *lrbp;
- struct scsi_cmnd *cmd;
- int result;
- int index;
- bool update_scaling = false;
-
- for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- lrbp = &hba->lrb[index];
- lrbp->compl_time_stamp = ktime_get();
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = ufshcd_transfer_rsp_status(hba, lrbp);
- scsi_dma_unmap(cmd);
- cmd->result = result;
- /* Mark completed command as NULL in LRB */
- lrbp->cmd = NULL;
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
- ufshcd_release(hba);
- update_scaling = true;
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- ufshcd_add_command_trace(hba, index,
- UFS_DEV_COMP);
- complete(hba->dev_cmd.complete);
- update_scaling = true;
- }
- }
- if (update_scaling)
- ufshcd_clk_scaling_update_busy(hba);
- }
-}
-
-/**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
- * @hba: per adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
-{
- unsigned long completed_reqs, flags;
- u32 tr_doorbell;
-
- /* Resetting interrupt aggregation counters first and reading the
- * DOOR_BELL afterward allows us to handle all the completed requests.
- * In order to prevent other interrupts starvation the DB is read once
- * after reset. The down side of this solution is the possibility of
- * false interrupt if device completes another request after resetting
- * aggregation and before reading the DB.
- */
- if (ufshcd_is_intr_aggr_allowed(hba) &&
- !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
- ufshcd_reset_intr_aggr(hba);
-
- if (ufs_fail_completion())
- return IRQ_HANDLED;
-
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
- WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
- "completed: %#lx; outstanding: %#lx\n", completed_reqs,
- hba->outstanding_reqs);
- hba->outstanding_reqs &= ~completed_reqs;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
-
- if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
-}
-
-int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
-{
- return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
- &ee_ctrl_mask);
-}
-
-int ufshcd_write_ee_control(struct ufs_hba *hba)
-{
- int err;
-
- mutex_lock(&hba->ee_ctrl_mutex);
- err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
- mutex_unlock(&hba->ee_ctrl_mutex);
- if (err)
- dev_err(hba->dev, "%s: failed to write ee control %d\n",
- __func__, err);
- return err;
-}
-
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr)
-{
- u16 new_mask, ee_ctrl_mask;
- int err = 0;
-
- mutex_lock(&hba->ee_ctrl_mutex);
- new_mask = (*mask & ~clr) | set;
- ee_ctrl_mask = new_mask | *other_mask;
- if (ee_ctrl_mask != hba->ee_ctrl_mask)
- err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
- /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
- if (!err) {
- hba->ee_ctrl_mask = ee_ctrl_mask;
- *mask = new_mask;
- }
- mutex_unlock(&hba->ee_ctrl_mutex);
- return err;
-}
-
-/**
- * ufshcd_disable_ee - disable exception event
- * @hba: per-adapter instance
- * @mask: exception event to disable
- *
- * Disables exception event in the device so that the EVENT_ALERT
- * bit is not set.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
-{
- return ufshcd_update_ee_drv_mask(hba, 0, mask);
-}
-
-/**
- * ufshcd_enable_ee - enable exception event
- * @hba: per-adapter instance
- * @mask: exception event to enable
- *
- * Enable corresponding exception event in the device to allow
- * device to alert host in critical scenarios.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
-{
- return ufshcd_update_ee_drv_mask(hba, mask, 0);
-}
-
-/**
- * ufshcd_enable_auto_bkops - Allow device managed BKOPS
- * @hba: per-adapter instance
- *
- * Allow device to manage background operations on its own. Enabling
- * this might lead to inconsistent latencies during normal data transfers
- * as the device is allowed to manage its own way of handling background
- * operations.
- *
- * Returns zero on success, non-zero on failure.
- */
-static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (hba->auto_bkops_enabled)
- goto out;
-
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
- if (err) {
- dev_err(hba->dev, "%s: failed to enable bkops %d\n",
- __func__, err);
- goto out;
- }
-
- hba->auto_bkops_enabled = true;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
-
- /* No need of URGENT_BKOPS exception from the device */
- err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
- if (err)
- dev_err(hba->dev, "%s: failed to disable exception event %d\n",
- __func__, err);
-out:
- return err;
-}
-
-/**
- * ufshcd_disable_auto_bkops - block device in doing background operations
- * @hba: per-adapter instance
- *
- * Disabling background operations improves command response latency but
- * has drawback of device moving into critical state where the device is
- * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
- * host is idle so that BKOPS are managed effectively without any negative
- * impacts.
- *
- * Returns zero on success, non-zero on failure.
- */
-static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (!hba->auto_bkops_enabled)
- goto out;
-
- /*
- * If host assisted BKOPs is to be enabled, make sure
- * urgent bkops exception is allowed.
- */
- err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
- if (err) {
- dev_err(hba->dev, "%s: failed to enable exception event %d\n",
- __func__, err);
- goto out;
- }
-
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
- if (err) {
- dev_err(hba->dev, "%s: failed to disable bkops %d\n",
- __func__, err);
- ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
- goto out;
- }
-
- hba->auto_bkops_enabled = false;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
- hba->is_urgent_bkops_lvl_checked = false;
-out:
- return err;
-}
-
-/**
- * ufshcd_force_reset_auto_bkops - force reset auto bkops state
- * @hba: per adapter instance
- *
- * After a device reset the device may toggle the BKOPS_EN flag
- * to default value. The s/w tracking variables should be updated
- * as well. This function would change the auto-bkops state based on
- * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
- */
-static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
-{
- if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
- hba->auto_bkops_enabled = false;
- hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
- ufshcd_enable_auto_bkops(hba);
- } else {
- hba->auto_bkops_enabled = true;
- hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
- ufshcd_disable_auto_bkops(hba);
- }
- hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
- hba->is_urgent_bkops_lvl_checked = false;
-}
-
-static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
-{
- return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
-}
-
-/**
- * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
- * @hba: per-adapter instance
- * @status: bkops_status value
- *
- * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
- * flag in the device to permit background operations if the device
- * bkops_status is greater than or equal to "status" argument passed to
- * this function, disable otherwise.
- *
- * Returns 0 for success, non-zero in case of failure.
- *
- * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
- * to know whether auto bkops is enabled or disabled after this function
- * returns control to it.
- */
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
- enum bkops_status status)
-{
- int err;
- u32 curr_status = 0;
-
- err = ufshcd_get_bkops_status(hba, &curr_status);
- if (err) {
- dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
- __func__, err);
- goto out;
- } else if (curr_status > BKOPS_STATUS_MAX) {
- dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
- __func__, curr_status);
- err = -EINVAL;
- goto out;
- }
-
- if (curr_status >= status)
- err = ufshcd_enable_auto_bkops(hba);
- else
- err = ufshcd_disable_auto_bkops(hba);
-out:
- return err;
-}
-
-/**
- * ufshcd_urgent_bkops - handle urgent bkops exception event
- * @hba: per-adapter instance
- *
- * Enable fBackgroundOpsEn flag in the device to permit background
- * operations.
- *
- * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
- * and negative error value for any other failure.
- */
-static int ufshcd_urgent_bkops(struct ufs_hba *hba)
-{
- return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
-}
-
-static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
-{
- return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
-}
-
-static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
-{
- int err;
- u32 curr_status = 0;
-
- if (hba->is_urgent_bkops_lvl_checked)
- goto enable_auto_bkops;
-
- err = ufshcd_get_bkops_status(hba, &curr_status);
- if (err) {
- dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
- __func__, err);
- goto out;
- }
-
- /*
- * We are seeing that some devices are raising the urgent bkops
- * exception events even when BKOPS status doesn't indicate performace
- * impacted or critical. Handle these device by determining their urgent
- * bkops status at runtime.
- */
- if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
- dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
- __func__, curr_status);
- /* update the current status as the urgent bkops level */
- hba->urgent_bkops_lvl = curr_status;
- hba->is_urgent_bkops_lvl_checked = true;
- }
-
-enable_auto_bkops:
- err = ufshcd_enable_auto_bkops(hba);
-out:
- if (err < 0)
- dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
- __func__, err);
-}
-
-static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
-{
- u32 value;
-
- if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
- return;
-
- dev_info(hba->dev, "exception Tcase %d\n", value - 80);
-
- ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
-
- /*
- * A placeholder for the platform vendors to add whatever additional
- * steps required
- */
-}
-
-static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
-{
- u8 index;
- enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
- UPIU_QUERY_OPCODE_CLEAR_FLAG;
-
- index = ufshcd_wb_get_query_index(hba);
- return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
-}
-
-int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
-{
- int ret;
-
- if (!ufshcd_is_wb_allowed(hba))
- return 0;
-
- if (!(enable ^ hba->dev_info.wb_enabled))
- return 0;
-
- ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
- if (ret) {
- dev_err(hba->dev, "%s Write Booster %s failed %d\n",
- __func__, enable ? "enable" : "disable", ret);
- return ret;
- }
-
- hba->dev_info.wb_enabled = enable;
- dev_info(hba->dev, "%s Write Booster %s\n",
- __func__, enable ? "enabled" : "disabled");
-
- return ret;
-}
-
-static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
-{
- int ret;
-
- ret = __ufshcd_wb_toggle(hba, set,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
- if (ret) {
- dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed: %d\n",
- __func__, set ? "enable" : "disable", ret);
- return;
- }
- dev_dbg(hba->dev, "%s WB-Buf Flush during H8 %s\n",
- __func__, set ? "enabled" : "disabled");
-}
-
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
-{
- int ret;
-
- if (!ufshcd_is_wb_allowed(hba) ||
- hba->dev_info.wb_buf_flush_enabled == enable)
- return;
-
- ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
- if (ret) {
- dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
- enable ? "enable" : "disable", ret);
- return;
- }
-
- hba->dev_info.wb_buf_flush_enabled = enable;
-
- dev_dbg(hba->dev, "%s WB-Buf Flush %s\n",
- __func__, enable ? "enabled" : "disabled");
-}
-
-static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
- u32 avail_buf)
-{
- u32 cur_buf;
- int ret;
- u8 index;
-
- index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
- index, 0, &cur_buf);
- if (ret) {
- dev_err(hba->dev, "%s dCurWriteBoosterBufferSize read failed %d\n",
- __func__, ret);
- return false;
- }
-
- if (!cur_buf) {
- dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
- cur_buf);
- return false;
- }
- /* Let it continue to flush when available buffer exceeds threshold */
- if (avail_buf < hba->vps->wb_flush_threshold)
- return true;
-
- return false;
-}
-
-static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
-{
- int ret;
- u32 avail_buf;
- u8 index;
-
- if (!ufshcd_is_wb_allowed(hba))
- return false;
- /*
- * The ufs device needs the vcc to be ON to flush.
- * With user-space reduction enabled, it's enough to enable flush
- * by checking only the available buffer. The threshold
- * defined here is > 90% full.
- * With user-space preserved enabled, the current-buffer
- * should be checked too because the wb buffer size can reduce
- * when disk tends to be full. This info is provided by current
- * buffer (dCurrentWriteBoosterBufferSize). There's no point in
- * keeping vcc on when current buffer is empty.
- */
- index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
- index, 0, &avail_buf);
- if (ret) {
- dev_warn(hba->dev, "%s dAvailableWriteBoosterBufferSize read failed %d\n",
- __func__, ret);
- return false;
- }
-
- if (!hba->dev_info.b_presrv_uspc_en) {
- if (avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10))
- return true;
- return false;
- }
-
- return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
-}
-
-static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
-{
- struct ufs_hba *hba = container_of(to_delayed_work(work),
- struct ufs_hba,
- rpm_dev_flush_recheck_work);
- /*
- * To prevent unnecessary VCC power drain after device finishes
- * WriteBooster buffer flush or Auto BKOPs, force runtime resume
- * after a certain delay to recheck the threshold by next runtime
- * suspend.
- */
- ufshcd_rpm_get_sync(hba);
- ufshcd_rpm_put_sync(hba);
-}
-
-/**
- * ufshcd_exception_event_handler - handle exceptions raised by device
- * @work: pointer to work data
- *
- * Read bExceptionEventStatus attribute from the device and handle the
- * exception event accordingly.
- */
-static void ufshcd_exception_event_handler(struct work_struct *work)
-{
- struct ufs_hba *hba;
- int err;
- u32 status = 0;
- hba = container_of(work, struct ufs_hba, eeh_work);
-
- ufshcd_scsi_block_requests(hba);
- err = ufshcd_get_ee_status(hba, &status);
- if (err) {
- dev_err(hba->dev, "%s: failed to get exception status %d\n",
- __func__, err);
- goto out;
- }
-
- trace_ufshcd_exception_event(dev_name(hba->dev), status);
-
- if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
- ufshcd_bkops_exception_event_handler(hba);
-
- if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
- ufshcd_temp_exception_event_handler(hba, status);
-
- ufs_debugfs_exception_event(hba, status);
-out:
- ufshcd_scsi_unblock_requests(hba);
-}
-
-/* Complete requests that have door-bell cleared */
-static void ufshcd_complete_requests(struct ufs_hba *hba)
-{
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
-}
-
-/**
- * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
- * to recover from the DL NAC errors or not.
- * @hba: per-adapter instance
- *
- * Returns true if error handling is required, false otherwise
- */
-static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
-{
- unsigned long flags;
- bool err_handling = true;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- /*
- * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
- * device fatal error and/or DL NAC & REPLAY timeout errors.
- */
- if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
- goto out;
-
- if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
- goto out;
-
- if ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
- int err;
- /*
- * wait for 50ms to see if we can get any other errors or not.
- */
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- msleep(50);
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- /*
- * now check if we have got any other severe errors other than
- * DL NAC error?
- */
- if ((hba->saved_err & INT_FATAL_ERRORS) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
- goto out;
-
- /*
- * As DL NAC is the only error received so far, send out NOP
- * command to confirm if link is still active or not.
- * - If we don't get any response then do error recovery.
- * - If we get response then clear the DL NAC error bit.
- */
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- err = ufshcd_verify_dev_init(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- if (err)
- goto out;
-
- /* Link seems to be alive hence ignore the DL NAC errors */
- if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
- hba->saved_err &= ~UIC_ERROR;
- /* clear NAC error */
- hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- if (!hba->saved_uic_err)
- err_handling = false;
- }
-out:
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return err_handling;
-}
-
-/* host lock must be held before calling this func */
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
- return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
- (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-void ufshcd_schedule_eh_work(struct ufs_hba *hba)
-{
- lockdep_assert_held(hba->host->host_lock);
-
- /* handle fatal errors only when link is not in error state */
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba))
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
- else
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
- queue_work(hba->eh_wq, &hba->eh_work);
- }
-}
-
-static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
-{
- down_write(&hba->clk_scaling_lock);
- hba->clk_scaling.is_allowed = allow;
- up_write(&hba->clk_scaling_lock);
-}
-
-static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
-{
- if (suspend) {
- if (hba->clk_scaling.is_enabled)
- ufshcd_suspend_clkscaling(hba);
- ufshcd_clk_scaling_allow(hba, false);
- } else {
- ufshcd_clk_scaling_allow(hba, true);
- if (hba->clk_scaling.is_enabled)
- ufshcd_resume_clkscaling(hba);
- }
-}
-
-static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
-{
- ufshcd_rpm_get_sync(hba);
- if (pm_runtime_status_suspended(&hba->sdev_ufs_device->sdev_gendev) ||
- hba->is_sys_suspended) {
- enum ufs_pm_op pm_op;
-
- /*
- * Don't assume anything of resume, if
- * resume fails, irq and clocks can be OFF, and powers
- * can be OFF or in LPM.
- */
- ufshcd_setup_hba_vreg(hba, true);
- ufshcd_enable_irq(hba);
- ufshcd_setup_vreg(hba, true);
- ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
- ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
- ufshcd_hold(hba, false);
- if (!ufshcd_is_clkgating_allowed(hba))
- ufshcd_setup_clocks(hba, true);
- ufshcd_release(hba);
- pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
- ufshcd_vops_resume(hba, pm_op);
- } else {
- ufshcd_hold(hba, false);
- if (ufshcd_is_clkscaling_supported(hba) &&
- hba->clk_scaling.is_enabled)
- ufshcd_suspend_clkscaling(hba);
- ufshcd_clk_scaling_allow(hba, false);
- }
- ufshcd_scsi_block_requests(hba);
- /* Drain ufshcd_queuecommand() */
- down_write(&hba->clk_scaling_lock);
- up_write(&hba->clk_scaling_lock);
- cancel_work_sync(&hba->eeh_work);
-}
-
-static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
-{
- ufshcd_scsi_unblock_requests(hba);
- ufshcd_release(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- ufshcd_clk_scaling_suspend(hba, false);
- ufshcd_rpm_put(hba);
-}
-
-static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
-{
- return (!hba->is_powered || hba->shutting_down ||
- !hba->sdev_ufs_device ||
- hba->ufshcd_state == UFSHCD_STATE_ERROR ||
- (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
- ufshcd_is_link_broken(hba))));
-}
-
-#ifdef CONFIG_PM
-static void ufshcd_recover_pm_error(struct ufs_hba *hba)
-{
- struct Scsi_Host *shost = hba->host;
- struct scsi_device *sdev;
- struct request_queue *q;
- int ret;
-
- hba->is_sys_suspended = false;
- /*
- * Set RPM status of wlun device to RPM_ACTIVE,
- * this also clears its runtime error.
- */
- ret = pm_runtime_set_active(&hba->sdev_ufs_device->sdev_gendev);
-
- /* hba device might have a runtime error otherwise */
- if (ret)
- ret = pm_runtime_set_active(hba->dev);
- /*
- * If wlun device had runtime error, we also need to resume those
- * consumer scsi devices in case any of them has failed to be
- * resumed due to supplier runtime resume failure. This is to unblock
- * blk_queue_enter in case there are bios waiting inside it.
- */
- if (!ret) {
- shost_for_each_device(sdev, shost) {
- q = sdev->request_queue;
- if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
- q->rpm_status == RPM_SUSPENDING))
- pm_request_resume(q->dev);
- }
- }
-}
-#else
-static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
-{
-}
-#endif
-
-static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
-{
- struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
- u32 mode;
-
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
-
- if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
- return true;
-
- if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
- return true;
-
- return false;
-}
-
-/**
- * ufshcd_err_handler - handle UFS errors that require s/w attention
- * @work: pointer to work structure
- */
-static void ufshcd_err_handler(struct work_struct *work)
-{
- int retries = MAX_ERR_HANDLER_RETRIES;
- struct ufs_hba *hba;
- unsigned long flags;
- bool needs_restore;
- bool needs_reset;
- bool err_xfer;
- bool err_tm;
- int pmc_err;
- int tag;
-
- hba = container_of(work, struct ufs_hba, eh_work);
-
- dev_info(hba->dev,
- "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
- __func__, ufshcd_state_name[hba->ufshcd_state],
- hba->is_powered, hba->shutting_down, hba->saved_err,
- hba->saved_uic_err, hba->force_reset,
- ufshcd_is_link_broken(hba) ? "; link is broken" : "");
-
- down(&hba->host_sem);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ufshcd_err_handling_should_stop(hba)) {
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- up(&hba->host_sem);
- return;
- }
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_err_handling_prepare(hba);
- /* Complete requests that have door-bell cleared by h/w */
- ufshcd_complete_requests(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
-again:
- needs_restore = false;
- needs_reset = false;
- err_xfer = false;
- err_tm = false;
-
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- /*
- * A full reset and restore might have happened after preparation
- * is finished, double check whether we should stop.
- */
- if (ufshcd_err_handling_should_stop(hba))
- goto skip_err_handling;
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- bool ret;
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
- ret = ufshcd_quirk_dl_nac_errors(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!ret && ufshcd_err_handling_should_stop(hba))
- goto skip_err_handling;
- }
-
- if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
- (hba->saved_uic_err &&
- (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
- bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
-
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_evt_hist(hba);
- ufshcd_print_tmrs(hba, hba->outstanding_tasks);
- ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
- spin_lock_irqsave(hba->host->host_lock, flags);
- }
-
- /*
- * if host reset is required then skip clearing the pending
- * transfers forcefully because they will get cleared during
- * host reset and restore
- */
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
- needs_reset = true;
- goto do_reset;
- }
-
- /*
- * If LINERESET was caught, UFS might have been put to PWM mode,
- * check if power mode restore is needed.
- */
- if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
- hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
- if (!hba->saved_uic_err)
- hba->saved_err &= ~UIC_ERROR;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (ufshcd_is_pwr_mode_restore_needed(hba))
- needs_restore = true;
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!hba->saved_err && !needs_restore)
- goto skip_err_handling;
- }
-
- hba->silence_err_logs = true;
- /* release lock as clear command might sleep */
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- /* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- if (ufshcd_try_to_abort_task(hba, tag)) {
- err_xfer = true;
- goto lock_skip_pending_xfer_clear;
- }
- dev_err(hba->dev, "Aborted tag %d / CDB %#02x\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1);
- }
-
- /* Clear pending task management requests */
- for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
- if (ufshcd_clear_tm_cmd(hba, tag)) {
- err_tm = true;
- goto lock_skip_pending_xfer_clear;
- }
- }
-
-lock_skip_pending_xfer_clear:
- /* Complete the requests that are cleared by s/w */
- ufshcd_complete_requests(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->silence_err_logs = false;
- if (err_xfer || err_tm) {
- needs_reset = true;
- goto do_reset;
- }
-
- /*
- * After all reqs and tasks are cleared from doorbell,
- * now it is safe to retore power mode.
- */
- if (needs_restore) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- /*
- * Hold the scaling lock just in case dev cmds
- * are sent via bsg and/or sysfs.
- */
- down_write(&hba->clk_scaling_lock);
- hba->force_pmc = true;
- pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
- if (pmc_err) {
- needs_reset = true;
- dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
- __func__, pmc_err);
- }
- hba->force_pmc = false;
- ufshcd_print_pwr_info(hba);
- up_write(&hba->clk_scaling_lock);
- spin_lock_irqsave(hba->host->host_lock, flags);
- }
-
-do_reset:
- /* Fatal errors need reset */
- if (needs_reset) {
- int err;
-
- hba->force_reset = false;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- err = ufshcd_reset_and_restore(hba);
- if (err)
- dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
- __func__, err);
- else
- ufshcd_recover_pm_error(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
- }
-
-skip_err_handling:
- if (!needs_reset) {
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- if (hba->saved_err || hba->saved_uic_err)
- dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
- __func__, hba->saved_err, hba->saved_uic_err);
- }
- /* Exit in an operational state or dead */
- if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
- hba->ufshcd_state != UFSHCD_STATE_ERROR) {
- if (--retries)
- goto again;
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- }
- ufshcd_clear_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_err_handling_unprepare(hba);
- up(&hba->host_sem);
-
- dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
- ufshcd_state_name[hba->ufshcd_state]);
-}
-
-/**
- * ufshcd_update_uic_error - check and set fatal UIC error flags.
- * @hba: per-adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
-{
- u32 reg;
- irqreturn_t retval = IRQ_NONE;
-
- /* PHY layer error */
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
- if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
- /*
- * To know whether this error is fatal or not, DB timeout
- * must be checked but this error is handled separately.
- */
- if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
- dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
- __func__);
-
- /* Got a LINERESET indication. */
- if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
- struct uic_command *cmd = NULL;
-
- hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
- if (hba->uic_async_done && hba->active_uic_cmd)
- cmd = hba->active_uic_cmd;
- /*
- * Ignore the LINERESET during power mode change
- * operation via DME_SET command.
- */
- if (cmd && (cmd->command == UIC_CMD_DME_SET))
- hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
- }
- retval |= IRQ_HANDLED;
- }
-
- /* PA_INIT_ERROR is fatal and needs UIC reset */
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
- (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
-
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
- else if (hba->dev_quirks &
- UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
- if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
- hba->uic_error |=
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
- hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
- }
- retval |= IRQ_HANDLED;
- }
-
- /* UIC NL/TL/DME errors needs software retry */
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if ((reg & UIC_NETWORK_LAYER_ERROR) &&
- (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
- hba->uic_error |= UFSHCD_UIC_NL_ERROR;
- retval |= IRQ_HANDLED;
- }
-
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
- (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
- ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
- hba->uic_error |= UFSHCD_UIC_TL_ERROR;
- retval |= IRQ_HANDLED;
- }
-
- reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if ((reg & UIC_DME_ERROR) &&
- (reg & UIC_DME_ERROR_CODE_MASK)) {
- ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
- hba->uic_error |= UFSHCD_UIC_DME_ERROR;
- retval |= IRQ_HANDLED;
- }
-
- dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
- __func__, hba->uic_error);
- return retval;
-}
-
-/**
- * ufshcd_check_errors - Check for errors that need s/w attention
- * @hba: per-adapter instance
- * @intr_status: interrupt status generated by the controller
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
-{
- bool queue_eh_work = false;
- irqreturn_t retval = IRQ_NONE;
-
- spin_lock(hba->host->host_lock);
- hba->errors |= UFSHCD_ERROR_MASK & intr_status;
-
- if (hba->errors & INT_FATAL_ERRORS) {
- ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
- hba->errors);
- queue_eh_work = true;
- }
-
- if (hba->errors & UIC_ERROR) {
- hba->uic_error = 0;
- retval = ufshcd_update_uic_error(hba);
- if (hba->uic_error)
- queue_eh_work = true;
- }
-
- if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
- dev_err(hba->dev,
- "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
- __func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
- "Enter" : "Exit",
- hba->errors, ufshcd_get_upmcrs(hba));
- ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
- hba->errors);
- ufshcd_set_link_broken(hba);
- queue_eh_work = true;
- }
-
- if (queue_eh_work) {
- /*
- * update the transfer error masks to sticky bits, let's do this
- * irrespective of current ufshcd_state.
- */
- hba->saved_err |= hba->errors;
- hba->saved_uic_err |= hba->uic_error;
-
- /* dump controller state before resetting */
- if ((hba->saved_err &
- (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
- (hba->saved_uic_err &&
- (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
- dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
- __func__, hba->saved_err,
- hba->saved_uic_err);
- ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
- "host_regs: ");
- ufshcd_print_pwr_info(hba);
- }
- ufshcd_schedule_eh_work(hba);
- retval |= IRQ_HANDLED;
- }
- /*
- * if (!queue_eh_work) -
- * Other errors are either non-fatal where host recovers
- * itself without s/w intervention or errors that will be
- * handled by the SCSI core layer.
- */
- hba->errors = 0;
- hba->uic_error = 0;
- spin_unlock(hba->host->host_lock);
- return retval;
-}
-
-/**
- * ufshcd_tmc_handler - handle task management function completion
- * @hba: per adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
-{
- unsigned long flags, pending, issued;
- irqreturn_t ret = IRQ_NONE;
- int tag;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- issued = hba->outstanding_tasks & ~pending;
- for_each_set_bit(tag, &issued, hba->nutmrs) {
- struct request *req = hba->tmf_rqs[tag];
- struct completion *c = req->end_io_data;
-
- complete(c);
- ret = IRQ_HANDLED;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return ret;
-}
-
-/**
- * ufshcd_sl_intr - Interrupt service routine
- * @hba: per adapter instance
- * @intr_status: contains interrupts generated by the controller
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
-{
- irqreturn_t retval = IRQ_NONE;
-
- if (intr_status & UFSHCD_UIC_MASK)
- retval |= ufshcd_uic_cmd_compl(hba, intr_status);
-
- if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
- retval |= ufshcd_check_errors(hba, intr_status);
-
- if (intr_status & UTP_TASK_REQ_COMPL)
- retval |= ufshcd_tmc_handler(hba);
-
- if (intr_status & UTP_TRANSFER_REQ_COMPL)
- retval |= ufshcd_transfer_req_compl(hba);
-
- return retval;
-}
-
-/**
- * ufshcd_intr - Main interrupt service routine
- * @irq: irq number
- * @__hba: pointer to adapter instance
- *
- * Returns
- * IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
- */
-static irqreturn_t ufshcd_intr(int irq, void *__hba)
-{
- u32 intr_status, enabled_intr_status = 0;
- irqreturn_t retval = IRQ_NONE;
- struct ufs_hba *hba = __hba;
- int retries = hba->nutrs;
-
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- hba->ufs_stats.last_intr_status = intr_status;
- hba->ufs_stats.last_intr_ts = ktime_get();
-
- /*
- * There could be max of hba->nutrs reqs in flight and in worst case
- * if the reqs get finished 1 by 1 after the interrupt status is
- * read, make sure we handle them by checking the interrupt status
- * again in a loop until we process all of the reqs before returning.
- */
- while (intr_status && retries--) {
- enabled_intr_status =
- intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- if (enabled_intr_status)
- retval |= ufshcd_sl_intr(hba, enabled_intr_status);
-
- intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
- }
-
- if (enabled_intr_status && retval == IRQ_NONE &&
- (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
- hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
- dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
- __func__,
- intr_status,
- hba->ufs_stats.last_intr_status,
- enabled_intr_status);
- ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
- }
-
- return retval;
-}
-
-static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
-{
- int err = 0;
- u32 mask = 1 << tag;
- unsigned long flags;
-
- if (!test_bit(tag, &hba->outstanding_tasks))
- goto out;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_utmrl_clear(hba, tag);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* poll for max. 1 sec to clear door bell register by h/w */
- err = ufshcd_wait_for_register(hba,
- REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000);
-
- dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
- tag, err ? "succeeded" : "failed");
-
-out:
- return err;
-}
-
-static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
- struct utp_task_req_desc *treq, u8 tm_function)
-{
- struct request_queue *q = hba->tmf_queue;
- struct Scsi_Host *host = hba->host;
- DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
- unsigned long flags;
- int task_tag, err;
-
- /*
- * blk_mq_alloc_request() is used here only to get a free tag.
- */
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->end_io_data = &wait;
- ufshcd_hold(hba, false);
-
- spin_lock_irqsave(host->host_lock, flags);
-
- task_tag = req->tag;
- hba->tmf_rqs[req->tag] = req;
- treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
-
- memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
- ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
-
- /* send command to the controller */
- __set_bit(task_tag, &hba->outstanding_tasks);
-
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
- /* Make sure that doorbell is committed immediately */
- wmb();
-
- spin_unlock_irqrestore(host->host_lock, flags);
-
- ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
-
- /* wait until the task management command is completed */
- err = wait_for_completion_io_timeout(&wait,
- msecs_to_jiffies(TM_CMD_TIMEOUT));
- if (!err) {
- ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
- dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
- __func__, tm_function);
- if (ufshcd_clear_tm_cmd(hba, task_tag))
- dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
- __func__, task_tag);
- err = -ETIMEDOUT;
- } else {
- err = 0;
- memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
-
- ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
- }
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->tmf_rqs[req->tag] = NULL;
- __clear_bit(task_tag, &hba->outstanding_tasks);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- ufshcd_release(hba);
- blk_mq_free_request(req);
-
- return err;
-}
-
-/**
- * ufshcd_issue_tm_cmd - issues task management commands to controller
- * @hba: per adapter instance
- * @lun_id: LUN ID to which TM command is sent
- * @task_id: task ID to which the TM command is applicable
- * @tm_function: task management function opcode
- * @tm_response: task management service response return value
- *
- * Returns non-zero value on error, zero on success.
- */
-static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
- u8 tm_function, u8 *tm_response)
-{
- struct utp_task_req_desc treq = { { 0 }, };
- enum utp_ocs ocs_value;
- int err;
-
- /* Configure task request descriptor */
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- /* Configure task request UPIU */
- treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
- cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
- treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
-
- /*
- * The host shall provide the same value for LUN field in the basic
- * header and for Input Parameter.
- */
- treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
- treq.upiu_req.input_param2 = cpu_to_be32(task_id);
-
- err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
- if (err == -ETIMEDOUT)
- return err;
-
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
- if (ocs_value != OCS_SUCCESS)
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
- __func__, ocs_value);
- else if (tm_response)
- *tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
- MASK_TM_SERVICE_RESP;
- return err;
-}
-
-/**
- * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
- * @hba: per-adapter instance
- * @req_upiu: upiu request
- * @rsp_upiu: upiu reply
- * @desc_buff: pointer to descriptor buffer, NULL if NA
- * @buff_len: descriptor size, 0 if NA
- * @cmd_type: specifies the type (NOP, Query...)
- * @desc_op: descriptor operation
- *
- * Those type of requests uses UTP Transfer Request Descriptor - utrd.
- * Therefore, it "rides" the device management infrastructure: uses its tag and
- * tasks work queues.
- *
- * Since there is only one available tag for device management commands,
- * the caller is expected to hold the hba->dev_cmd.lock mutex.
- */
-static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- u8 *desc_buff, int *buff_len,
- enum dev_cmd_type cmd_type,
- enum query_opcode desc_op)
-{
- struct request_queue *q = hba->cmd_queue;
- DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
- struct ufshcd_lrb *lrbp;
- int err = 0;
- int tag;
- u8 upiu_flags;
-
- down_read(&hba->clk_scaling_lock);
-
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
-
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- err = -EBUSY;
- goto out;
- }
-
- lrbp = &hba->lrb[tag];
- WARN_ON(lrbp->cmd);
- lrbp->cmd = NULL;
- lrbp->sense_bufflen = 0;
- lrbp->sense_buffer = NULL;
- lrbp->task_tag = tag;
- lrbp->lun = 0;
- lrbp->intr_cmd = true;
- ufshcd_prepare_lrbp_crypto(NULL, lrbp);
- hba->dev_cmd.type = cmd_type;
-
- if (hba->ufs_version <= ufshci_version(1, 1))
- lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
- else
- lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-
- /* update the task tag in the request upiu */
- req_upiu->header.dword_0 |= cpu_to_be32(tag);
-
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
-
- /* just copy the upiu request as it is */
- memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
- if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
- /* The Data Segment Area is optional depending upon the query
- * function value. for WRITE DESCRIPTOR, the data segment
- * follows right after the tsf.
- */
- memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
- *buff_len = 0;
- }
-
- memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
-
- hba->dev_cmd.complete = &wait;
-
- ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
-
- ufshcd_send_command(hba, tag);
- /*
- * ignore the returning value here - ufshcd_check_query_response is
- * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
- * read the response directly ignoring all errors.
- */
- ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
-
- /* just copy the upiu response as it is */
- memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
- if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
- u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
- u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
-
- if (*buff_len >= resp_len) {
- memcpy(desc_buff, descp, resp_len);
- *buff_len = resp_len;
- } else {
- dev_warn(hba->dev,
- "%s: rsp size %d is bigger than buffer size %d",
- __func__, resp_len, *buff_len);
- *buff_len = 0;
- err = -EINVAL;
- }
- }
- ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
- (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-
-out:
- blk_mq_free_request(req);
-out_unlock:
- up_read(&hba->clk_scaling_lock);
- return err;
-}
-
-/**
- * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
- * @hba: per-adapter instance
- * @req_upiu: upiu request
- * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
- * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
- * @desc_buff: pointer to descriptor buffer, NULL if NA
- * @buff_len: descriptor size, 0 if NA
- * @desc_op: descriptor operation
- *
- * Supports UTP Transfer requests (nop and query), and UTP Task
- * Management requests.
- * It is up to the caller to fill the upiu conent properly, as it will
- * be copied without any further input validations.
- */
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op)
-{
- int err;
- enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
- struct utp_task_req_desc treq = { { 0 }, };
- enum utp_ocs ocs_value;
- u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
-
- switch (msgcode) {
- case UPIU_TRANSACTION_NOP_OUT:
- cmd_type = DEV_CMD_TYPE_NOP;
- fallthrough;
- case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba, false);
- mutex_lock(&hba->dev_cmd.lock);
- err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
- desc_buff, buff_len,
- cmd_type, desc_op);
- mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
-
- break;
- case UPIU_TRANSACTION_TASK_REQ:
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
-
- err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
- if (err == -ETIMEDOUT)
- break;
-
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
- if (ocs_value != OCS_SUCCESS) {
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
- ocs_value);
- break;
- }
-
- memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
-
- break;
- default:
- err = -EINVAL;
-
- break;
- }
-
- return err;
-}
-
-/**
- * ufshcd_eh_device_reset_handler - device reset handler registered to
- * scsi layer.
- * @cmd: SCSI command pointer
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- u32 pos;
- int err;
- u8 resp = 0xF, lun;
-
- host = cmd->device->host;
- hba = shost_priv(host);
-
- lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
- err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err)
- err = resp;
- goto out;
- }
-
- /* clear the commands that were pending for corresponding LUN */
- for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lun) {
- err = ufshcd_clear_cmd(hba, pos);
- if (err)
- break;
- __ufshcd_transfer_req_compl(hba, 1U << pos);
- }
- }
-
-out:
- hba->req_abort_count = 0;
- ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
- if (!err) {
- err = SUCCESS;
- } else {
- dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
- err = FAILED;
- }
- return err;
-}
-
-static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
-{
- struct ufshcd_lrb *lrbp;
- int tag;
-
- for_each_set_bit(tag, &bitmap, hba->nutrs) {
- lrbp = &hba->lrb[tag];
- lrbp->req_abort_skip = true;
- }
-}
-
-/**
- * ufshcd_try_to_abort_task - abort a specific task
- * @hba: Pointer to adapter instance
- * @tag: Task tag/index to be aborted
- *
- * Abort the pending command in device by sending UFS_ABORT_TASK task management
- * command, and in host controller by clearing the door-bell register. There can
- * be race between controller sending the command to the device while abort is
- * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
- * really issued and then try to abort it.
- *
- * Returns zero on success, non-zero on failure
- */
-static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
-{
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- int err = 0;
- int poll_cnt;
- u8 resp = 0xF;
- u32 reg;
-
- for (poll_cnt = 100; poll_cnt; poll_cnt--) {
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_QUERY_TASK, &resp);
- if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
- /* cmd pending in the device */
- dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
- __func__, tag);
- break;
- } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- /*
- * cmd not pending in the device, check if it is
- * in transition.
- */
- dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
- __func__, tag);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (reg & (1 << tag)) {
- /* sleep for max. 200us to stabilize */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
- __func__, tag);
- goto out;
- } else {
- dev_err(hba->dev,
- "%s: no response from device. tag = %d, err %d\n",
- __func__, tag, err);
- if (!err)
- err = resp; /* service response error */
- goto out;
- }
- }
-
- if (!poll_cnt) {
- err = -EBUSY;
- goto out;
- }
-
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_ABORT_TASK, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err) {
- err = resp; /* service response error */
- dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
- __func__, tag, err);
- }
- goto out;
- }
-
- err = ufshcd_clear_cmd(hba, tag);
- if (err)
- dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
- __func__, tag, err);
-
-out:
- return err;
-}
-
-/**
- * ufshcd_abort - scsi host template eh_abort_handler callback
- * @cmd: SCSI command pointer
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_abort(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *host = cmd->device->host;
- struct ufs_hba *hba = shost_priv(host);
- int tag = scsi_cmd_to_rq(cmd)->tag;
- struct ufshcd_lrb *lrbp = &hba->lrb[tag];
- unsigned long flags;
- int err = FAILED;
- u32 reg;
-
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
-
- ufshcd_hold(hba, false);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- /* If command is already aborted/completed, return FAILED. */
- if (!(test_bit(tag, &hba->outstanding_reqs))) {
- dev_err(hba->dev,
- "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
- __func__, tag, hba->outstanding_reqs, reg);
- goto release;
- }
-
- /* Print Transfer Request of aborted task */
- dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
-
- /*
- * Print detailed info about aborted request.
- * As more than one request might get aborted at the same time,
- * print full information only for the first aborted request in order
- * to reduce repeated printouts. For other aborted requests only print
- * basic details.
- */
- scsi_print_command(cmd);
- if (!hba->req_abort_count) {
- ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
- ufshcd_print_evt_hist(hba);
- ufshcd_print_host_state(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_trs(hba, 1 << tag, true);
- } else {
- ufshcd_print_trs(hba, 1 << tag, false);
- }
- hba->req_abort_count++;
-
- if (!(reg & (1 << tag))) {
- dev_err(hba->dev,
- "%s: cmd was completed, but without a notifying intr, tag = %d",
- __func__, tag);
- __ufshcd_transfer_req_compl(hba, 1UL << tag);
- goto release;
- }
-
- /*
- * Task abort to the device W-LUN is illegal. When this command
- * will fail, due to spec violation, scsi err handling next step
- * will be to send LU reset which, again, is a spec violation.
- * To avoid these unnecessary/illegal steps, first we clean up
- * the lrb taken by this cmd and re-set it in outstanding_reqs,
- * then queue the eh_work and bail.
- */
- if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
- ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
-
- spin_lock_irqsave(host->host_lock, flags);
- hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
- spin_unlock_irqrestore(host->host_lock, flags);
- goto release;
- }
-
- /* Skip task abort in case previous aborts failed and report failure */
- if (lrbp->req_abort_skip) {
- dev_err(hba->dev, "%s: skipping abort\n", __func__);
- ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
- goto release;
- }
-
- err = ufshcd_try_to_abort_task(hba, tag);
- if (err) {
- dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
- ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
- err = FAILED;
- goto release;
- }
-
- lrbp->cmd = NULL;
- err = SUCCESS;
-
-release:
- /* Matches the ufshcd_hold() call at the start of this function. */
- ufshcd_release(hba);
- return err;
-}
-
-/**
- * ufshcd_host_reset_and_restore - reset and restore host controller
- * @hba: per-adapter instance
- *
- * Note that host controller reset may issue DME_RESET to
- * local and remote (device) Uni-Pro stack and the attributes
- * are reset to default state.
- *
- * Returns zero on success, non-zero on failure
- */
-static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
-{
- int err;
-
- /*
- * Stop the host controller and complete the requests
- * cleared by h/w
- */
- ufshpb_reset_host(hba);
- ufshcd_hba_stop(hba);
- hba->silence_err_logs = true;
- ufshcd_complete_requests(hba);
- hba->silence_err_logs = false;
-
- /* scale up clocks to max frequency before full reinitialization */
- ufshcd_set_clk_freq(hba, true);
-
- err = ufshcd_hba_enable(hba);
-
- /* Establish the link again and restore the device */
- if (!err)
- err = ufshcd_probe_hba(hba, false);
-
- if (err)
- dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
- ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
- return err;
-}
-
-/**
- * ufshcd_reset_and_restore - reset and re-initialize host/device
- * @hba: per-adapter instance
- *
- * Reset and recover device, host and re-establish link. This
- * is helpful to recover the communication in fatal error conditions.
- *
- * Returns zero on success, non-zero on failure
- */
-static int ufshcd_reset_and_restore(struct ufs_hba *hba)
-{
- u32 saved_err = 0;
- u32 saved_uic_err = 0;
- int err = 0;
- unsigned long flags;
- int retries = MAX_HOST_RESET_RETRIES;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- do {
- /*
- * This is a fresh start, cache and clear saved error first,
- * in case new error generated during reset and restore.
- */
- saved_err |= hba->saved_err;
- saved_uic_err |= hba->saved_uic_err;
- hba->saved_err = 0;
- hba->saved_uic_err = 0;
- hba->force_reset = false;
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- /* Reset the attached device */
- ufshcd_device_reset(hba);
-
- err = ufshcd_host_reset_and_restore(hba);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (err)
- continue;
- /* Do not exit unless operational or dead */
- if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
- hba->ufshcd_state != UFSHCD_STATE_ERROR &&
- hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
- err = -EAGAIN;
- } while (err && --retries);
-
- /*
- * Inform scsi mid-layer that we did reset and allow to handle
- * Unit Attention properly.
- */
- scsi_report_bus_reset(hba->host, 0);
- if (err) {
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- hba->saved_err |= saved_err;
- hba->saved_uic_err |= saved_uic_err;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return err;
-}
-
-/**
- * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
- * @cmd: SCSI command pointer
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
-{
- int err = SUCCESS;
- unsigned long flags;
- struct ufs_hba *hba;
-
- hba = shost_priv(cmd->device->host);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->force_reset = true;
- ufshcd_schedule_eh_work(hba);
- dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- flush_work(&hba->eh_work);
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
- err = FAILED;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return err;
-}
-
-/**
- * ufshcd_get_max_icc_level - calculate the ICC level
- * @sup_curr_uA: max. current supported by the regulator
- * @start_scan: row at the desc table to start scan from
- * @buff: power descriptor buffer
- *
- * Returns calculated max ICC level for specific regulator
- */
-static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
-{
- int i;
- int curr_uA;
- u16 data;
- u16 unit;
-
- for (i = start_scan; i >= 0; i--) {
- data = be16_to_cpup((__be16 *)&buff[2 * i]);
- unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
- ATTR_ICC_LVL_UNIT_OFFSET;
- curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
- switch (unit) {
- case UFSHCD_NANO_AMP:
- curr_uA = curr_uA / 1000;
- break;
- case UFSHCD_MILI_AMP:
- curr_uA = curr_uA * 1000;
- break;
- case UFSHCD_AMP:
- curr_uA = curr_uA * 1000 * 1000;
- break;
- case UFSHCD_MICRO_AMP:
- default:
- break;
- }
- if (sup_curr_uA >= curr_uA)
- break;
- }
- if (i < 0) {
- i = 0;
- pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
- }
-
- return (u32)i;
-}
-
-/**
- * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
- * In case regulators are not initialized we'll return 0
- * @hba: per-adapter instance
- * @desc_buf: power descriptor buffer to extract ICC levels from.
- * @len: length of desc_buff
- *
- * Returns calculated ICC level
- */
-static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- u8 *desc_buf, int len)
-{
- u32 icc_level = 0;
-
- if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
- !hba->vreg_info.vccq2) {
- dev_err(hba->dev,
- "%s: Regulator capability was not set, actvIccLevel=%d",
- __func__, icc_level);
- goto out;
- }
-
- if (hba->vreg_info.vcc->max_uA)
- icc_level = ufshcd_get_max_icc_level(
- hba->vreg_info.vcc->max_uA,
- POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
- &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
-
- if (hba->vreg_info.vccq->max_uA)
- icc_level = ufshcd_get_max_icc_level(
- hba->vreg_info.vccq->max_uA,
- icc_level,
- &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
-
- if (hba->vreg_info.vccq2->max_uA)
- icc_level = ufshcd_get_max_icc_level(
- hba->vreg_info.vccq2->max_uA,
- icc_level,
- &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
-out:
- return icc_level;
-}
-
-static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
-{
- int ret;
- int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
- u8 *desc_buf;
- u32 icc_level;
-
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
- if (!desc_buf)
- return;
-
- ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
- desc_buf, buff_len);
- if (ret) {
- dev_err(hba->dev,
- "%s: Failed reading power descriptor.len = %d ret = %d",
- __func__, buff_len, ret);
- goto out;
- }
-
- icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
- buff_len);
- dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
-
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
-
- if (ret)
- dev_err(hba->dev,
- "%s: Failed configuring bActiveICCLevel = %d ret = %d",
- __func__, icc_level, ret);
-
-out:
- kfree(desc_buf);
-}
-
-static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
-{
- scsi_autopm_get_device(sdev);
- blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
- if (sdev->rpm_autosuspend)
- pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
- RPM_AUTOSUSPEND_DELAY_MS);
- scsi_autopm_put_device(sdev);
-}
-
-/**
- * ufshcd_scsi_add_wlus - Adds required W-LUs
- * @hba: per-adapter instance
- *
- * UFS device specification requires the UFS devices to support 4 well known
- * logical units:
- * "REPORT_LUNS" (address: 01h)
- * "UFS Device" (address: 50h)
- * "RPMB" (address: 44h)
- * "BOOT" (address: 30h)
- * UFS device's power management needs to be controlled by "POWER CONDITION"
- * field of SSU (START STOP UNIT) command. But this "power condition" field
- * will take effect only when its sent to "UFS device" well known logical unit
- * hence we require the scsi_device instance to represent this logical unit in
- * order for the UFS host driver to send the SSU command for power management.
- *
- * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
- * Block) LU so user space process can control this LU. User space may also
- * want to have access to BOOT LU.
- *
- * This function adds scsi device instances for each of all well known LUs
- * (except "REPORT LUNS" LU).
- *
- * Returns zero on success (all required W-LUs are added successfully),
- * non-zero error value on failure (if failed to add any of the required W-LU).
- */
-static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
-{
- int ret = 0;
- struct scsi_device *sdev_boot;
-
- hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
- if (IS_ERR(hba->sdev_ufs_device)) {
- ret = PTR_ERR(hba->sdev_ufs_device);
- hba->sdev_ufs_device = NULL;
- goto out;
- }
- scsi_device_put(hba->sdev_ufs_device);
-
- hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(hba->sdev_rpmb)) {
- ret = PTR_ERR(hba->sdev_rpmb);
- goto remove_sdev_ufs_device;
- }
- ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
- scsi_device_put(hba->sdev_rpmb);
-
- sdev_boot = __scsi_add_device(hba->host, 0, 0,
- ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
- if (IS_ERR(sdev_boot)) {
- dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
- } else {
- ufshcd_blk_pm_runtime_init(sdev_boot);
- scsi_device_put(sdev_boot);
- }
- goto out;
-
-remove_sdev_ufs_device:
- scsi_remove_device(hba->sdev_ufs_device);
-out:
- return ret;
-}
-
-static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u8 lun;
- u32 d_lu_wb_buf_alloc;
- u32 ext_ufs_feature;
-
- if (!ufshcd_is_wb_allowed(hba))
- return;
- /*
- * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
- * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
- * enabled
- */
- if (!(dev_info->wspecversion >= 0x310 ||
- dev_info->wspecversion == 0x220 ||
- (hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
- goto wb_disabled;
-
- if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
- goto wb_disabled;
-
- ext_ufs_feature = get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
-
- if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
- goto wb_disabled;
-
- /*
- * WB may be supported but not configured while provisioning. The spec
- * says, in dedicated wb buffer mode, a max of 1 lun would have wb
- * buffer configured.
- */
- dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
-
- dev_info->b_presrv_uspc_en =
- desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
-
- if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
- if (!get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
- goto wb_disabled;
- } else {
- for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
- d_lu_wb_buf_alloc = 0;
- ufshcd_read_unit_desc_param(hba,
- lun,
- UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
- (u8 *)&d_lu_wb_buf_alloc,
- sizeof(d_lu_wb_buf_alloc));
- if (d_lu_wb_buf_alloc) {
- dev_info->wb_dedicated_lu = lun;
- break;
- }
- }
-
- if (!d_lu_wb_buf_alloc)
- goto wb_disabled;
- }
- return;
-
-wb_disabled:
- hba->caps &= ~UFSHCD_CAP_WB_EN;
-}
-
-static void ufshcd_temp_notif_probe(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u32 ext_ufs_feature;
- u8 mask = 0;
-
- if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
- return;
-
- ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
-
- if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
- mask |= MASK_EE_TOO_LOW_TEMP;
-
- if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
- mask |= MASK_EE_TOO_HIGH_TEMP;
-
- if (mask) {
- ufshcd_enable_ee(hba, mask);
- ufs_hwmon_probe(hba, mask);
- }
-}
-
-void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups)
-{
- struct ufs_dev_fix *f;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- if (!fixups)
- return;
-
- for (f = fixups; f->quirk; f++) {
- if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
- f->wmanufacturerid == UFS_ANY_VENDOR) &&
- ((dev_info->model &&
- STR_PRFX_EQUAL(f->model, dev_info->model)) ||
- !strcmp(f->model, UFS_ANY_MODEL)))
- hba->dev_quirks |= f->quirk;
- }
-}
-EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
-
-static void ufs_fixup_device_setup(struct ufs_hba *hba)
-{
- /* fix by general quirk table */
- ufshcd_fixup_dev_quirks(hba, ufs_fixups);
-
- /* allow vendors to fix quirks */
- ufshcd_vops_fixup_dev_quirks(hba);
-}
-
-static int ufs_get_device_desc(struct ufs_hba *hba)
-{
- int err;
- u8 model_index;
- u8 b_ufs_feature_sup;
- u8 *desc_buf;
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
- if (!desc_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
- hba->desc_size[QUERY_DESC_IDN_DEVICE]);
- if (err) {
- dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
- __func__, err);
- goto out;
- }
-
- /*
- * getting vendor (manufacturerID) and Bank Index in big endian
- * format
- */
- dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
- desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
-
- /* getting Specification Version in big endian format */
- dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
- desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
- b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
-
- model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
-
- if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
- (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
- bool hpb_en = false;
-
- ufshpb_get_dev_info(hba, desc_buf);
-
- if (!ufshpb_is_legacy(hba))
- err = ufshcd_query_flag_retry(hba,
- UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_HPB_EN, 0,
- &hpb_en);
-
- if (ufshpb_is_legacy(hba) || (!err && hpb_en))
- dev_info->hpb_enabled = true;
- }
-
- err = ufshcd_read_string_desc(hba, model_index,
- &dev_info->model, SD_ASCII_STD);
- if (err < 0) {
- dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
- __func__, err);
- goto out;
- }
-
- hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
- desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
-
- ufs_fixup_device_setup(hba);
-
- ufshcd_wb_probe(hba, desc_buf);
-
- ufshcd_temp_notif_probe(hba, desc_buf);
-
- /*
- * ufshcd_read_string_desc returns size of the string
- * reset the error value
- */
- err = 0;
-
-out:
- kfree(desc_buf);
- return err;
-}
-
-static void ufs_put_device_desc(struct ufs_hba *hba)
-{
- struct ufs_dev_info *dev_info = &hba->dev_info;
-
- kfree(dev_info->model);
- dev_info->model = NULL;
-}
-
-/**
- * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
- * @hba: per-adapter instance
- *
- * PA_TActivate parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
- * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
- * the hibern8 exit latency.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(
- RX_MIN_ACTIVATETIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_min_activatetime);
- if (ret)
- goto out;
-
- /* make sure proper unit conversion is applied */
- tuned_pa_tactivate =
- ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
- / PA_TACTIVATE_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- tuned_pa_tactivate);
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
- * @hba: per-adapter instance
- *
- * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
- * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
- * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
- * This optimal value can help reduce the hibern8 exit latency.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
- u32 max_hibern8_time, tuned_pa_hibern8time;
-
- ret = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &local_tx_hibern8_time_cap);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba,
- UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
- &peer_rx_hibern8_time_cap);
- if (ret)
- goto out;
-
- max_hibern8_time = max(local_tx_hibern8_time_cap,
- peer_rx_hibern8_time_cap);
- /* make sure proper unit conversion is applied */
- tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
- / PA_HIBERN8_TIME_UNIT_US);
- ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
- tuned_pa_hibern8time);
-out:
- return ret;
-}
-
-/**
- * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
- * less than device PA_TACTIVATE time.
- * @hba: per-adapter instance
- *
- * Some UFS devices require host PA_TACTIVATE to be lower than device
- * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
- * for such devices.
- *
- * Returns zero on success, non-zero error value on failure.
- */
-static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
-{
- int ret = 0;
- u32 granularity, peer_granularity;
- u32 pa_tactivate, peer_pa_tactivate;
- u32 pa_tactivate_us, peer_pa_tactivate_us;
- u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &granularity);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
- &peer_granularity);
- if (ret)
- goto out;
-
- if ((granularity < PA_GRANULARITY_MIN_VAL) ||
- (granularity > PA_GRANULARITY_MAX_VAL)) {
- dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
- __func__, granularity);
- return -EINVAL;
- }
-
- if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
- (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
- dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
- __func__, peer_granularity);
- return -EINVAL;
- }
-
- ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
- if (ret)
- goto out;
-
- ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
- &peer_pa_tactivate);
- if (ret)
- goto out;
-
- pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
- peer_pa_tactivate_us = peer_pa_tactivate *
- gran_to_us_table[peer_granularity - 1];
-
- if (pa_tactivate_us > peer_pa_tactivate_us) {
- u32 new_peer_pa_tactivate;
-
- new_peer_pa_tactivate = pa_tactivate_us /
- gran_to_us_table[peer_granularity - 1];
- new_peer_pa_tactivate++;
- ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
- new_peer_pa_tactivate);
- }
-
-out:
- return ret;
-}
-
-static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
-{
- if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
- ufshcd_tune_pa_tactivate(hba);
- ufshcd_tune_pa_hibern8time(hba);
- }
-
- ufshcd_vops_apply_dev_quirks(hba);
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
- /* set 1ms timeout for PA_TACTIVATE */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
- ufshcd_quirk_tune_host_pa_tactivate(hba);
-}
-
-static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
-{
- hba->ufs_stats.hibern8_exit_cnt = 0;
- hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- hba->req_abort_count = 0;
-}
-
-static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
-{
- int err;
- size_t buff_len;
- u8 *desc_buf;
-
- buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
- if (!desc_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
- desc_buf, buff_len);
- if (err) {
- dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
- __func__, err);
- goto out;
- }
-
- if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
- hba->dev_info.max_lu_supported = 32;
- else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
- hba->dev_info.max_lu_supported = 8;
-
- if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
- ufshpb_get_geo_info(hba, desc_buf);
-
-out:
- kfree(desc_buf);
- return err;
-}
-
-static struct ufs_ref_clk ufs_ref_clk_freqs[] = {
- {19200000, REF_CLK_FREQ_19_2_MHZ},
- {26000000, REF_CLK_FREQ_26_MHZ},
- {38400000, REF_CLK_FREQ_38_4_MHZ},
- {52000000, REF_CLK_FREQ_52_MHZ},
- {0, REF_CLK_FREQ_INVAL},
-};
-
-static enum ufs_ref_clk_freq
-ufs_get_bref_clk_from_hz(unsigned long freq)
-{
- int i;
-
- for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
- if (ufs_ref_clk_freqs[i].freq_hz == freq)
- return ufs_ref_clk_freqs[i].val;
-
- return REF_CLK_FREQ_INVAL;
-}
-
-void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
-{
- unsigned long freq;
-
- freq = clk_get_rate(refclk);
-
- hba->dev_ref_clk_freq =
- ufs_get_bref_clk_from_hz(freq);
-
- if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
- dev_err(hba->dev,
- "invalid ref_clk setting = %ld\n", freq);
-}
-
-static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
-{
- int err;
- u32 ref_clk;
- u32 freq = hba->dev_ref_clk_freq;
-
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
-
- if (err) {
- dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
- err);
- goto out;
- }
-
- if (ref_clk == freq)
- goto out; /* nothing to update */
-
- err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
-
- if (err) {
- dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
- ufs_ref_clk_freqs[freq].freq_hz);
- goto out;
- }
-
- dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
- ufs_ref_clk_freqs[freq].freq_hz);
-
-out:
- return err;
-}
-
-static int ufshcd_device_params_init(struct ufs_hba *hba)
-{
- bool flag;
- int ret, i;
-
- /* Init device descriptor sizes */
- for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
- hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
-
- /* Init UFS geometry descriptor related parameters */
- ret = ufshcd_device_geo_params_init(hba);
- if (ret)
- goto out;
-
- /* Check and apply UFS device quirks */
- ret = ufs_get_device_desc(hba);
- if (ret) {
- dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
- __func__, ret);
- goto out;
- }
-
- ufshcd_get_ref_clk_gating_wait(hba);
-
- if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
- hba->dev_info.f_power_on_wp_en = flag;
-
- /* Probe maximum power mode co-supported by both UFS host and device */
- if (ufshcd_get_max_pwr_mode(hba))
- dev_err(hba->dev,
- "%s: Failed getting max supported power mode\n",
- __func__);
-out:
- return ret;
-}
-
-/**
- * ufshcd_add_lus - probe and add UFS logical units
- * @hba: per-adapter instance
- */
-static int ufshcd_add_lus(struct ufs_hba *hba)
-{
- int ret;
-
- /* Add required well known logical units to scsi mid layer */
- ret = ufshcd_scsi_add_wlus(hba);
- if (ret)
- goto out;
-
- /* Initialize devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_supported(hba)) {
- memcpy(&hba->clk_scaling.saved_pwr_info.info,
- &hba->pwr_info,
- sizeof(struct ufs_pa_layer_attr));
- hba->clk_scaling.saved_pwr_info.is_valid = true;
- hba->clk_scaling.is_allowed = true;
-
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
-
- hba->clk_scaling.is_enabled = true;
- ufshcd_init_clk_scaling_sysfs(hba);
- }
-
- ufs_bsg_probe(hba);
- ufshpb_init(hba);
- scsi_scan_host(hba->host);
- pm_runtime_put_sync(hba->dev);
-
-out:
- return ret;
-}
-
-/**
- * ufshcd_probe_hba - probe hba to detect device and initialize it
- * @hba: per-adapter instance
- * @init_dev_params: whether or not to call ufshcd_device_params_init().
- *
- * Execute link-startup and verify device initialization
- */
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
-{
- int ret;
- unsigned long flags;
- ktime_t start = ktime_get();
-
- hba->ufshcd_state = UFSHCD_STATE_RESET;
-
- ret = ufshcd_link_startup(hba);
- if (ret)
- goto out;
-
- if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
- goto out;
-
- /* Debug counters initialization */
- ufshcd_clear_dbg_ufs_stats(hba);
-
- /* UniPro link is active now */
- ufshcd_set_link_active(hba);
-
- /* Verify device initialization by sending NOP OUT UPIU */
- ret = ufshcd_verify_dev_init(hba);
- if (ret)
- goto out;
-
- /* Initiate UFS initialization, and waiting until completion */
- ret = ufshcd_complete_dev_init(hba);
- if (ret)
- goto out;
-
- /*
- * Initialize UFS device parameters used by driver, these
- * parameters are associated with UFS descriptors.
- */
- if (init_dev_params) {
- ret = ufshcd_device_params_init(hba);
- if (ret)
- goto out;
- }
-
- ufshcd_tune_unipro_params(hba);
-
- /* UFS device is also active now */
- ufshcd_set_ufs_dev_active(hba);
- ufshcd_force_reset_auto_bkops(hba);
-
- /* Gear up to HS gear if supported */
- if (hba->max_pwr_info.is_valid) {
- /*
- * Set the right value to bRefClkFreq before attempting to
- * switch to HS gears.
- */
- if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
- ufshcd_set_dev_ref_clk(hba);
- ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret) {
- dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
- __func__, ret);
- goto out;
- }
- ufshcd_print_pwr_info(hba);
- }
-
- /*
- * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
- * and for removable UFS card as well, hence always set the parameter.
- * Note: Error handler may issue the device reset hence resetting
- * bActiveICCLevel as well so it is always safe to set this here.
- */
- ufshcd_set_active_icc_lvl(hba);
-
- ufshcd_wb_config(hba);
- if (hba->ee_usr_mask)
- ufshcd_write_ee_control(hba);
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
- ufshpb_reset(hba);
-out:
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (ret)
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- trace_ufshcd_init(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
-}
-
-/**
- * ufshcd_async_scan - asynchronous execution for probing hba
- * @data: data pointer to pass to this function
- * @cookie: cookie data
- */
-static void ufshcd_async_scan(void *data, async_cookie_t cookie)
-{
- struct ufs_hba *hba = (struct ufs_hba *)data;
- int ret;
-
- down(&hba->host_sem);
- /* Initialize hba, detect and initialize UFS device */
- ret = ufshcd_probe_hba(hba, true);
- up(&hba->host_sem);
- if (ret)
- goto out;
-
- /* Probe and add UFS logical units */
- ret = ufshcd_add_lus(hba);
-out:
- /*
- * If we failed to initialize the device or the device is not
- * present, turn off the power/clocks etc.
- */
- if (ret) {
- pm_runtime_put_sync(hba->dev);
- ufshcd_hba_exit(hba);
- }
-}
-
-static const struct attribute_group *ufshcd_driver_groups[] = {
- &ufs_sysfs_unit_descriptor_group,
- &ufs_sysfs_lun_attributes_group,
-#ifdef CONFIG_SCSI_UFS_HPB
- &ufs_sysfs_hpb_stat_group,
- &ufs_sysfs_hpb_param_group,
-#endif
- NULL,
-};
-
-static struct ufs_hba_variant_params ufs_hba_vps = {
- .hba_enable_delay_us = 1000,
- .wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
- .devfreq_profile.polling_ms = 100,
- .devfreq_profile.target = ufshcd_devfreq_target,
- .devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
- .ondemand_data.upthreshold = 70,
- .ondemand_data.downdifferential = 5,
-};
-
-static struct scsi_host_template ufshcd_driver_template = {
- .module = THIS_MODULE,
- .name = UFSHCD,
- .proc_name = UFSHCD,
- .queuecommand = ufshcd_queuecommand,
- .slave_alloc = ufshcd_slave_alloc,
- .slave_configure = ufshcd_slave_configure,
- .slave_destroy = ufshcd_slave_destroy,
- .change_queue_depth = ufshcd_change_queue_depth,
- .eh_abort_handler = ufshcd_abort,
- .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
- .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
- .this_id = -1,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = UFSHCD_CMD_PER_LUN,
- .can_queue = UFSHCD_CAN_QUEUE,
- .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
- .max_host_blocked = 1,
- .track_queue_depth = 1,
- .sdev_groups = ufshcd_driver_groups,
- .dma_boundary = PAGE_SIZE - 1,
- .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
-};
-
-static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
- int ua)
-{
- int ret;
-
- if (!vreg)
- return 0;
-
- /*
- * "set_load" operation shall be required on those regulators
- * which specifically configured current limitation. Otherwise
- * zero max_uA may cause unexpected behavior when regulator is
- * enabled or set as high power mode.
- */
- if (!vreg->max_uA)
- return 0;
-
- ret = regulator_set_load(vreg->reg, ua);
- if (ret < 0) {
- dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
- __func__, vreg->name, ua, ret);
- }
-
- return ret;
-}
-
-static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
- struct ufs_vreg *vreg)
-{
- return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
-}
-
-static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
- struct ufs_vreg *vreg)
-{
- if (!vreg)
- return 0;
-
- return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
-}
-
-static int ufshcd_config_vreg(struct device *dev,
- struct ufs_vreg *vreg, bool on)
-{
- int ret = 0;
- struct regulator *reg;
- const char *name;
- int min_uV, uA_load;
-
- BUG_ON(!vreg);
-
- reg = vreg->reg;
- name = vreg->name;
-
- if (regulator_count_voltages(reg) > 0) {
- uA_load = on ? vreg->max_uA : 0;
- ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
- if (ret)
- goto out;
-
- if (vreg->min_uV && vreg->max_uV) {
- min_uV = on ? vreg->min_uV : 0;
- ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret)
- dev_err(dev,
- "%s: %s set voltage failed, err=%d\n",
- __func__, name, ret);
- }
- }
-out:
- return ret;
-}
-
-static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || vreg->enabled)
- goto out;
-
- ret = ufshcd_config_vreg(dev, vreg, true);
- if (!ret)
- ret = regulator_enable(vreg->reg);
-
- if (!ret)
- vreg->enabled = true;
- else
- dev_err(dev, "%s: %s enable failed, err=%d\n",
- __func__, vreg->name, ret);
-out:
- return ret;
-}
-
-static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg || !vreg->enabled || vreg->always_on)
- goto out;
-
- ret = regulator_disable(vreg->reg);
-
- if (!ret) {
- /* ignore errors on applying disable config */
- ufshcd_config_vreg(dev, vreg, false);
- vreg->enabled = false;
- } else {
- dev_err(dev, "%s: %s disable failed, err=%d\n",
- __func__, vreg->name, ret);
- }
-out:
- return ret;
-}
-
-static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
-{
- int ret = 0;
- struct device *dev = hba->dev;
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- ret = ufshcd_toggle_vreg(dev, info->vcc, on);
- if (ret)
- goto out;
-
- ret = ufshcd_toggle_vreg(dev, info->vccq, on);
- if (ret)
- goto out;
-
- ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
-
-out:
- if (ret) {
- ufshcd_toggle_vreg(dev, info->vccq2, false);
- ufshcd_toggle_vreg(dev, info->vccq, false);
- ufshcd_toggle_vreg(dev, info->vcc, false);
- }
- return ret;
-}
-
-static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
-{
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
-}
-
-static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
-{
- int ret = 0;
-
- if (!vreg)
- goto out;
-
- vreg->reg = devm_regulator_get(dev, vreg->name);
- if (IS_ERR(vreg->reg)) {
- ret = PTR_ERR(vreg->reg);
- dev_err(dev, "%s: %s get failed, err=%d\n",
- __func__, vreg->name, ret);
- }
-out:
- return ret;
-}
-
-static int ufshcd_init_vreg(struct ufs_hba *hba)
-{
- int ret = 0;
- struct device *dev = hba->dev;
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- ret = ufshcd_get_vreg(dev, info->vcc);
- if (ret)
- goto out;
-
- ret = ufshcd_get_vreg(dev, info->vccq);
- if (!ret)
- ret = ufshcd_get_vreg(dev, info->vccq2);
-out:
- return ret;
-}
-
-static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
-{
- struct ufs_vreg_info *info = &hba->vreg_info;
-
- if (info)
- return ufshcd_get_vreg(hba->dev, info->vdd_hba);
-
- return 0;
-}
-
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
-{
- int ret = 0;
- struct ufs_clk_info *clki;
- struct list_head *head = &hba->clk_list_head;
- unsigned long flags;
- ktime_t start = ktime_get();
- bool clk_state_changed = false;
-
- if (list_empty(head))
- goto out;
-
- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
- if (ret)
- return ret;
-
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk)) {
- /*
- * Don't disable clocks which are needed
- * to keep the link active.
- */
- if (ufshcd_is_link_active(hba) &&
- clki->keep_link_active)
- continue;
-
- clk_state_changed = on ^ clki->enabled;
- if (on && !clki->enabled) {
- ret = clk_prepare_enable(clki->clk);
- if (ret) {
- dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
- __func__, clki->name, ret);
- goto out;
- }
- } else if (!on && clki->enabled) {
- clk_disable_unprepare(clki->clk);
- }
- clki->enabled = on;
- dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
- clki->name, on ? "en" : "dis");
- }
- }
-
- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
- if (ret)
- return ret;
-
-out:
- if (ret) {
- list_for_each_entry(clki, head, list) {
- if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
- clk_disable_unprepare(clki->clk);
- }
- } else if (!ret && on) {
- spin_lock_irqsave(hba->host->host_lock, flags);
- hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- }
-
- if (clk_state_changed)
- trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
- (on ? "on" : "off"),
- ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- return ret;
-}
-
-static int ufshcd_init_clocks(struct ufs_hba *hba)
-{
- int ret = 0;
- struct ufs_clk_info *clki;
- struct device *dev = hba->dev;
- struct list_head *head = &hba->clk_list_head;
-
- if (list_empty(head))
- goto out;
-
- list_for_each_entry(clki, head, list) {
- if (!clki->name)
- continue;
-
- clki->clk = devm_clk_get(dev, clki->name);
- if (IS_ERR(clki->clk)) {
- ret = PTR_ERR(clki->clk);
- dev_err(dev, "%s: %s clk get failed, %d\n",
- __func__, clki->name, ret);
- goto out;
- }
-
- /*
- * Parse device ref clk freq as per device tree "ref_clk".
- * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
- * in ufshcd_alloc_host().
- */
- if (!strcmp(clki->name, "ref_clk"))
- ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
-
- if (clki->max_freq) {
- ret = clk_set_rate(clki->clk, clki->max_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->max_freq, ret);
- goto out;
- }
- clki->curr_freq = clki->max_freq;
- }
- dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
- clki->name, clk_get_rate(clki->clk));
- }
-out:
- return ret;
-}
-
-static int ufshcd_variant_hba_init(struct ufs_hba *hba)
-{
- int err = 0;
-
- if (!hba->vops)
- goto out;
-
- err = ufshcd_vops_init(hba);
- if (err)
- dev_err(hba->dev, "%s: variant %s init failed err %d\n",
- __func__, ufshcd_get_var_name(hba), err);
-out:
- return err;
-}
-
-static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
-{
- if (!hba->vops)
- return;
-
- ufshcd_vops_exit(hba);
-}
-
-static int ufshcd_hba_init(struct ufs_hba *hba)
-{
- int err;
-
- /*
- * Handle host controller power separately from the UFS device power
- * rails as it will help controlling the UFS host controller power
- * collapse easily which is different than UFS device power collapse.
- * Also, enable the host controller power before we go ahead with rest
- * of the initialization here.
- */
- err = ufshcd_init_hba_vreg(hba);
- if (err)
- goto out;
-
- err = ufshcd_setup_hba_vreg(hba, true);
- if (err)
- goto out;
-
- err = ufshcd_init_clocks(hba);
- if (err)
- goto out_disable_hba_vreg;
-
- err = ufshcd_setup_clocks(hba, true);
- if (err)
- goto out_disable_hba_vreg;
-
- err = ufshcd_init_vreg(hba);
- if (err)
- goto out_disable_clks;
-
- err = ufshcd_setup_vreg(hba, true);
- if (err)
- goto out_disable_clks;
-
- err = ufshcd_variant_hba_init(hba);
- if (err)
- goto out_disable_vreg;
-
- ufs_debugfs_hba_init(hba);
-
- hba->is_powered = true;
- goto out;
-
-out_disable_vreg:
- ufshcd_setup_vreg(hba, false);
-out_disable_clks:
- ufshcd_setup_clocks(hba, false);
-out_disable_hba_vreg:
- ufshcd_setup_hba_vreg(hba, false);
-out:
- return err;
-}
-
-static void ufshcd_hba_exit(struct ufs_hba *hba)
-{
- if (hba->is_powered) {
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- if (hba->eh_wq)
- destroy_workqueue(hba->eh_wq);
- ufs_debugfs_hba_exit(hba);
- ufshcd_variant_hba_exit(hba);
- ufshcd_setup_vreg(hba, false);
- ufshcd_setup_clocks(hba, false);
- ufshcd_setup_hba_vreg(hba, false);
- hba->is_powered = false;
- ufs_put_device_desc(hba);
- }
-}
-
-/**
- * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
- * power mode
- * @hba: per adapter instance
- * @pwr_mode: device power mode to set
- *
- * Returns 0 if requested power mode is set successfully
- * Returns non-zero if failed to set the requested power mode
- */
-static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
- enum ufs_dev_pwr_mode pwr_mode)
-{
- unsigned char cmd[6] = { START_STOP };
- struct scsi_sense_hdr sshdr;
- struct scsi_device *sdp;
- unsigned long flags;
- int ret, retries;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- sdp = hba->sdev_ufs_device;
- if (sdp) {
- ret = scsi_device_get(sdp);
- if (!ret && !scsi_device_online(sdp)) {
- ret = -ENODEV;
- scsi_device_put(sdp);
- }
- } else {
- ret = -ENODEV;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (ret)
- return ret;
-
- /*
- * If scsi commands fail, the scsi mid-layer schedules scsi error-
- * handling, which would wait for host to be resumed. Since we know
- * we are functional while we are here, skip host resume in error
- * handling context.
- */
- hba->host->eh_noresume = 1;
-
- cmd[4] = pwr_mode << 4;
-
- /*
- * Current function would be generally called from the power management
- * callbacks hence set the RQF_PM flag so that it doesn't resume the
- * already suspended childs.
- */
- for (retries = 3; retries > 0; --retries) {
- ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
- if (!scsi_status_is_check_condition(ret) ||
- !scsi_sense_valid(&sshdr) ||
- sshdr.sense_key != UNIT_ATTENTION)
- break;
- }
- if (ret) {
- sdev_printk(KERN_WARNING, sdp,
- "START_STOP failed for power mode: %d, result %x\n",
- pwr_mode, ret);
- if (ret > 0 && scsi_sense_valid(&sshdr))
- scsi_print_sense_hdr(sdp, NULL, &sshdr);
- }
-
- if (!ret)
- hba->curr_dev_pwr_mode = pwr_mode;
-
- scsi_device_put(sdp);
- hba->host->eh_noresume = 0;
- return ret;
-}
-
-static int ufshcd_link_state_transition(struct ufs_hba *hba,
- enum uic_link_state req_link_state,
- int check_for_bkops)
-{
- int ret = 0;
-
- if (req_link_state == hba->uic_link_state)
- return 0;
-
- if (req_link_state == UIC_LINK_HIBERN8_STATE) {
- ret = ufshcd_uic_hibern8_enter(hba);
- if (!ret) {
- ufshcd_set_link_hibern8(hba);
- } else {
- dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
- __func__, ret);
- goto out;
- }
- }
- /*
- * If autobkops is enabled, link can't be turned off because
- * turning off the link would also turn off the device, except in the
- * case of DeepSleep where the device is expected to remain powered.
- */
- else if ((req_link_state == UIC_LINK_OFF_STATE) &&
- (!check_for_bkops || !hba->auto_bkops_enabled)) {
- /*
- * Let's make sure that link is in low power mode, we are doing
- * this currently by putting the link in Hibern8. Otherway to
- * put the link in low power mode is to send the DME end point
- * to device and then send the DME reset command to local
- * unipro. But putting the link in hibern8 is much faster.
- *
- * Note also that putting the link in Hibern8 is a requirement
- * for entering DeepSleep.
- */
- ret = ufshcd_uic_hibern8_enter(hba);
- if (ret) {
- dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
- __func__, ret);
- goto out;
- }
- /*
- * Change controller state to "reset state" which
- * should also put the link in off/reset state
- */
- ufshcd_hba_stop(hba);
- /*
- * TODO: Check if we need any delay to make sure that
- * controller is reset
- */
- ufshcd_set_link_off(hba);
- }
-
-out:
- return ret;
-}
-
-static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
-{
- bool vcc_off = false;
-
- /*
- * It seems some UFS devices may keep drawing more than sleep current
- * (atleast for 500us) from UFS rails (especially from VCCQ rail).
- * To avoid this situation, add 2ms delay before putting these UFS
- * rails in LPM mode.
- */
- if (!ufshcd_is_link_active(hba) &&
- hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
- usleep_range(2000, 2100);
-
- /*
- * If UFS device is either in UFS_Sleep turn off VCC rail to save some
- * power.
- *
- * If UFS device and link is in OFF state, all power supplies (VCC,
- * VCCQ, VCCQ2) can be turned off if power on write protect is not
- * required. If UFS link is inactive (Hibern8 or OFF state) and device
- * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
- *
- * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
- * in low power state which would save some power.
- *
- * If Write Booster is enabled and the device needs to flush the WB
- * buffer OR if bkops status is urgent for WB, keep Vcc on.
- */
- if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
- !hba->dev_info.is_lu_power_on_wp) {
- ufshcd_setup_vreg(hba, false);
- vcc_off = true;
- } else if (!ufshcd_is_ufs_dev_active(hba)) {
- ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
- vcc_off = true;
- if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
- ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
- ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
- }
- }
-
- /*
- * Some UFS devices require delay after VCC power rail is turned-off.
- */
- if (vcc_off && hba->vreg_info.vcc &&
- hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
- usleep_range(5000, 5100);
-}
-
-#ifdef CONFIG_PM
-static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
-{
- int ret = 0;
-
- if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
- !hba->dev_info.is_lu_power_on_wp) {
- ret = ufshcd_setup_vreg(hba, true);
- } else if (!ufshcd_is_ufs_dev_active(hba)) {
- if (!ufshcd_is_link_active(hba)) {
- ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
- if (ret)
- goto vcc_disable;
- ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
- if (ret)
- goto vccq_lpm;
- }
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
- }
- goto out;
-
-vccq_lpm:
- ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
-vcc_disable:
- ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
-out:
- return ret;
-}
-#endif /* CONFIG_PM */
-
-static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
-{
- if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
- ufshcd_setup_hba_vreg(hba, false);
-}
-
-static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
-{
- if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
- ufshcd_setup_hba_vreg(hba, true);
-}
-
-static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- int ret = 0;
- int check_for_bkops;
- enum ufs_pm_level pm_lvl;
- enum ufs_dev_pwr_mode req_dev_pwr_mode;
- enum uic_link_state req_link_state;
-
- hba->pm_op_in_progress = true;
- if (pm_op != UFS_SHUTDOWN_PM) {
- pm_lvl = pm_op == UFS_RUNTIME_PM ?
- hba->rpm_lvl : hba->spm_lvl;
- req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
- req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
- } else {
- req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
- req_link_state = UIC_LINK_OFF_STATE;
- }
-
- ufshpb_suspend(hba);
-
- /*
- * If we can't transition into any of the low power modes
- * just gate the clocks.
- */
- ufshcd_hold(hba, false);
- hba->clk_gating.is_suspended = true;
-
- if (ufshcd_is_clkscaling_supported(hba))
- ufshcd_clk_scaling_suspend(hba, true);
-
- if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
- req_link_state == UIC_LINK_ACTIVE_STATE) {
- goto vops_suspend;
- }
-
- if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
- (req_link_state == hba->uic_link_state))
- goto enable_scaling;
-
- /* UFS device & link must be active before we enter in this function */
- if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
- ret = -EINVAL;
- goto enable_scaling;
- }
-
- if (pm_op == UFS_RUNTIME_PM) {
- if (ufshcd_can_autobkops_during_suspend(hba)) {
- /*
- * The device is idle with no requests in the queue,
- * allow background operations if bkops status shows
- * that performance might be impacted.
- */
- ret = ufshcd_urgent_bkops(hba);
- if (ret)
- goto enable_scaling;
- } else {
- /* make sure that auto bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
- }
- /*
- * If device needs to do BKOP or WB buffer flush during
- * Hibern8, keep device power mode as "active power mode"
- * and VCC supply.
- */
- hba->dev_info.b_rpm_dev_flush_capable =
- hba->auto_bkops_enabled ||
- (((req_link_state == UIC_LINK_HIBERN8_STATE) ||
- ((req_link_state == UIC_LINK_ACTIVE_STATE) &&
- ufshcd_is_auto_hibern8_enabled(hba))) &&
- ufshcd_wb_need_flush(hba));
- }
-
- flush_work(&hba->eeh_work);
-
- ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
- if (ret)
- goto enable_scaling;
-
- if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
- if (pm_op != UFS_RUNTIME_PM)
- /* ensure that bkops is disabled */
- ufshcd_disable_auto_bkops(hba);
-
- if (!hba->dev_info.b_rpm_dev_flush_capable) {
- ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
- if (ret)
- goto enable_scaling;
- }
- }
-
- /*
- * In the case of DeepSleep, the device is expected to remain powered
- * with the link off, so do not check for bkops.
- */
- check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
- ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
- if (ret)
- goto set_dev_active;
-
-vops_suspend:
- /*
- * Call vendor specific suspend callback. As these callbacks may access
- * vendor specific host controller register space call them before the
- * host clocks are ON.
- */
- ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
- if (ret)
- goto set_link_active;
- goto out;
-
-set_link_active:
- /*
- * Device hardware reset is required to exit DeepSleep. Also, for
- * DeepSleep, the link is off so host reset and restore will be done
- * further below.
- */
- if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_device_reset(hba);
- WARN_ON(!ufshcd_is_link_off(hba));
- }
- if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
- ufshcd_set_link_active(hba);
- else if (ufshcd_is_link_off(hba))
- ufshcd_host_reset_and_restore(hba);
-set_dev_active:
- /* Can also get here needing to exit DeepSleep */
- if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_device_reset(hba);
- ufshcd_host_reset_and_restore(hba);
- }
- if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
- ufshcd_disable_auto_bkops(hba);
-enable_scaling:
- if (ufshcd_is_clkscaling_supported(hba))
- ufshcd_clk_scaling_suspend(hba, false);
-
- hba->dev_info.b_rpm_dev_flush_capable = false;
-out:
- if (hba->dev_info.b_rpm_dev_flush_capable) {
- schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
- msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
- }
-
- if (ret) {
- ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
- hba->clk_gating.is_suspended = false;
- ufshcd_release(hba);
- ufshpb_resume(hba);
- }
- hba->pm_op_in_progress = false;
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
-{
- int ret;
- enum uic_link_state old_link_state = hba->uic_link_state;
-
- hba->pm_op_in_progress = true;
-
- /*
- * Call vendor specific resume callback. As these callbacks may access
- * vendor specific host controller register space call them when the
- * host clocks are ON.
- */
- ret = ufshcd_vops_resume(hba, pm_op);
- if (ret)
- goto out;
-
- /* For DeepSleep, the only supported option is to have the link off */
- WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
-
- if (ufshcd_is_link_hibern8(hba)) {
- ret = ufshcd_uic_hibern8_exit(hba);
- if (!ret) {
- ufshcd_set_link_active(hba);
- } else {
- dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
- __func__, ret);
- goto vendor_suspend;
- }
- } else if (ufshcd_is_link_off(hba)) {
- /*
- * A full initialization of the host and the device is
- * required since the link was put to off during suspend.
- * Note, in the case of DeepSleep, the device will exit
- * DeepSleep due to device reset.
- */
- ret = ufshcd_reset_and_restore(hba);
- /*
- * ufshcd_reset_and_restore() should have already
- * set the link state as active
- */
- if (ret || !ufshcd_is_link_active(hba))
- goto vendor_suspend;
- }
-
- if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
- if (ret)
- goto set_old_link_state;
- }
-
- if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
- ufshcd_enable_auto_bkops(hba);
- else
- /*
- * If BKOPs operations are urgently needed at this moment then
- * keep auto-bkops enabled or else disable it.
- */
- ufshcd_urgent_bkops(hba);
-
- if (hba->ee_usr_mask)
- ufshcd_write_ee_control(hba);
-
- if (ufshcd_is_clkscaling_supported(hba))
- ufshcd_clk_scaling_suspend(hba, false);
-
- if (hba->dev_info.b_rpm_dev_flush_capable) {
- hba->dev_info.b_rpm_dev_flush_capable = false;
- cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
- }
-
- /* Enable Auto-Hibernate if configured */
- ufshcd_auto_hibern8_enable(hba);
-
- ufshpb_resume(hba);
- goto out;
-
-set_old_link_state:
- ufshcd_link_state_transition(hba, old_link_state, 0);
-vendor_suspend:
- ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
- ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
-out:
- if (ret)
- ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
- hba->clk_gating.is_suspended = false;
- ufshcd_release(hba);
- hba->pm_op_in_progress = false;
- return ret;
-}
-
-static int ufshcd_wl_runtime_suspend(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba;
- int ret;
- ktime_t start = ktime_get();
-
- hba = shost_priv(sdev->host);
-
- ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
- if (ret)
- dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
-
- trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
-
- return ret;
-}
-
-static int ufshcd_wl_runtime_resume(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba;
- int ret = 0;
- ktime_t start = ktime_get();
-
- hba = shost_priv(sdev->host);
-
- ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
- if (ret)
- dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
-
- trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
-
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int ufshcd_wl_suspend(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba;
- int ret = 0;
- ktime_t start = ktime_get();
-
- hba = shost_priv(sdev->host);
- down(&hba->host_sem);
-
- if (pm_runtime_suspended(dev))
- goto out;
-
- ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
- if (ret) {
- dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- up(&hba->host_sem);
- }
-
-out:
- if (!ret)
- hba->is_sys_suspended = true;
- trace_ufshcd_wl_suspend(dev_name(dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
-
- return ret;
-}
-
-static int ufshcd_wl_resume(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba;
- int ret = 0;
- ktime_t start = ktime_get();
-
- hba = shost_priv(sdev->host);
-
- if (pm_runtime_suspended(dev))
- goto out;
-
- ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
- if (ret)
- dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
-out:
- trace_ufshcd_wl_resume(dev_name(dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- if (!ret)
- hba->is_sys_suspended = false;
- up(&hba->host_sem);
- return ret;
-}
-#endif
-
-static void ufshcd_wl_shutdown(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba;
-
- hba = shost_priv(sdev->host);
-
- down(&hba->host_sem);
- hba->shutting_down = true;
- up(&hba->host_sem);
-
- /* Turn on everything while shutting down */
- ufshcd_rpm_get_sync(hba);
- scsi_device_quiesce(sdev);
- shost_for_each_device(sdev, hba->host) {
- if (sdev == hba->sdev_ufs_device)
- continue;
- scsi_device_quiesce(sdev);
- }
- __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
-}
-
-/**
- * ufshcd_suspend - helper function for suspend operations
- * @hba: per adapter instance
- *
- * This function will put disable irqs, turn off clocks
- * and set vreg and hba-vreg in lpm mode.
- */
-static int ufshcd_suspend(struct ufs_hba *hba)
-{
- int ret;
-
- if (!hba->is_powered)
- return 0;
- /*
- * Disable the host irq as host controller as there won't be any
- * host controller transaction expected till resume.
- */
- ufshcd_disable_irq(hba);
- ret = ufshcd_setup_clocks(hba, false);
- if (ret) {
- ufshcd_enable_irq(hba);
- return ret;
- }
- if (ufshcd_is_clkgating_allowed(hba)) {
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
- hba->clk_gating.state);
- }
-
- ufshcd_vreg_set_lpm(hba);
- /* Put the host controller in low power mode if possible */
- ufshcd_hba_vreg_set_lpm(hba);
- return ret;
-}
-
-#ifdef CONFIG_PM
-/**
- * ufshcd_resume - helper function for resume operations
- * @hba: per adapter instance
- *
- * This function basically turns on the regulators, clocks and
- * irqs of the hba.
- *
- * Returns 0 for success and non-zero for failure
- */
-static int ufshcd_resume(struct ufs_hba *hba)
-{
- int ret;
-
- if (!hba->is_powered)
- return 0;
-
- ufshcd_hba_vreg_set_hpm(hba);
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto out;
-
- /* Make sure clocks are enabled before accessing controller */
- ret = ufshcd_setup_clocks(hba, true);
- if (ret)
- goto disable_vreg;
-
- /* enable the host irq as host controller would be active soon */
- ufshcd_enable_irq(hba);
- goto out;
-
-disable_vreg:
- ufshcd_vreg_set_lpm(hba);
-out:
- if (ret)
- ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
- return ret;
-}
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * ufshcd_system_suspend - system suspend callback
- * @dev: Device associated with the UFS controller.
- *
- * Executed before putting the system into a sleep state in which the contents
- * of main memory are preserved.
- *
- * Returns 0 for success and non-zero for failure
- */
-int ufshcd_system_suspend(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int ret = 0;
- ktime_t start = ktime_get();
-
- if (pm_runtime_suspended(hba->dev))
- goto out;
-
- ret = ufshcd_suspend(hba);
-out:
- trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_system_suspend);
-
-/**
- * ufshcd_system_resume - system resume callback
- * @dev: Device associated with the UFS controller.
- *
- * Executed after waking the system up from a sleep state in which the contents
- * of main memory were preserved.
- *
- * Returns 0 for success and non-zero for failure
- */
-int ufshcd_system_resume(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- ktime_t start = ktime_get();
- int ret = 0;
-
- if (pm_runtime_suspended(hba->dev))
- goto out;
-
- ret = ufshcd_resume(hba);
-
-out:
- trace_ufshcd_system_resume(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
-
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_system_resume);
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM
-/**
- * ufshcd_runtime_suspend - runtime suspend callback
- * @dev: Device associated with the UFS controller.
- *
- * Check the description of ufshcd_suspend() function for more details.
- *
- * Returns 0 for success and non-zero for failure
- */
-int ufshcd_runtime_suspend(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int ret;
- ktime_t start = ktime_get();
-
- ret = ufshcd_suspend(hba);
-
- trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_runtime_suspend);
-
-/**
- * ufshcd_runtime_resume - runtime resume routine
- * @dev: Device associated with the UFS controller.
- *
- * This function basically brings controller
- * to active state. Following operations are done in this function:
- *
- * 1. Turn on all the controller related clocks
- * 2. Turn ON VCC rail
- */
-int ufshcd_runtime_resume(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int ret;
- ktime_t start = ktime_get();
-
- ret = ufshcd_resume(hba);
-
- trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
- ktime_to_us(ktime_sub(ktime_get(), start)),
- hba->curr_dev_pwr_mode, hba->uic_link_state);
- return ret;
-}
-EXPORT_SYMBOL(ufshcd_runtime_resume);
-#endif /* CONFIG_PM */
-
-/**
- * ufshcd_shutdown - shutdown routine
- * @hba: per adapter instance
- *
- * This function would turn off both UFS device and UFS hba
- * regulators. It would also disable clocks.
- *
- * Returns 0 always to allow force shutdown even in case of errors.
- */
-int ufshcd_shutdown(struct ufs_hba *hba)
-{
- if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
- goto out;
-
- pm_runtime_get_sync(hba->dev);
-
- ufshcd_suspend(hba);
-out:
- hba->is_powered = false;
- /* allow force shutdown even in case of errors */
- return 0;
-}
-EXPORT_SYMBOL(ufshcd_shutdown);
-
-/**
- * ufshcd_remove - de-allocate SCSI host and host memory space
- * data structure memory
- * @hba: per adapter instance
- */
-void ufshcd_remove(struct ufs_hba *hba)
-{
- if (hba->sdev_ufs_device)
- ufshcd_rpm_get_sync(hba);
- ufs_hwmon_remove(hba);
- ufs_bsg_remove(hba);
- ufshpb_remove(hba);
- ufs_sysfs_remove_nodes(hba->dev);
- blk_cleanup_queue(hba->tmf_queue);
- blk_mq_free_tag_set(&hba->tmf_tag_set);
- blk_cleanup_queue(hba->cmd_queue);
- scsi_remove_host(hba->host);
- /* disable interrupts */
- ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba);
- ufshcd_hba_exit(hba);
-}
-EXPORT_SYMBOL_GPL(ufshcd_remove);
-
-/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- * addressing capability
- * @hba: per adapter instance
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
- if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
- if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
- return 0;
- }
- return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
-}
-
-/**
- * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
- * @dev: pointer to device handle
- * @hba_handle: driver private handle
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
-{
- struct Scsi_Host *host;
- struct ufs_hba *hba;
- int err = 0;
-
- if (!dev) {
- dev_err(dev,
- "Invalid memory reference for dev is NULL\n");
- err = -ENODEV;
- goto out_error;
- }
-
- host = scsi_host_alloc(&ufshcd_driver_template,
- sizeof(struct ufs_hba));
- if (!host) {
- dev_err(dev, "scsi_host_alloc failed\n");
- err = -ENOMEM;
- goto out_error;
- }
- hba = shost_priv(host);
- hba->host = host;
- hba->dev = dev;
- hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
- hba->nop_out_timeout = NOP_OUT_TIMEOUT;
- INIT_LIST_HEAD(&hba->clk_list_head);
- spin_lock_init(&hba->outstanding_lock);
-
- *hba_handle = hba;
-
-out_error:
- return err;
-}
-EXPORT_SYMBOL(ufshcd_alloc_host);
-
-/* This function exists because blk_mq_alloc_tag_set() requires this. */
-static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *qd)
-{
- WARN_ON_ONCE(true);
- return BLK_STS_NOTSUPP;
-}
-
-static const struct blk_mq_ops ufshcd_tmf_ops = {
- .queue_rq = ufshcd_queue_tmf,
-};
-
-/**
- * ufshcd_init - Driver initialization routine
- * @hba: per-adapter instance
- * @mmio_base: base register address
- * @irq: Interrupt line of device
- * Returns 0 on success, non-zero value on failure
- */
-int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
-{
- int err;
- struct Scsi_Host *host = hba->host;
- struct device *dev = hba->dev;
- char eh_wq_name[sizeof("ufs_eh_wq_00")];
-
- if (!mmio_base) {
- dev_err(hba->dev,
- "Invalid memory reference for mmio_base is NULL\n");
- err = -ENODEV;
- goto out_error;
- }
-
- hba->mmio_base = mmio_base;
- hba->irq = irq;
- hba->vps = &ufs_hba_vps;
-
- err = ufshcd_hba_init(hba);
- if (err)
- goto out_error;
-
- /* Read capabilities registers */
- err = ufshcd_hba_capabilities(hba);
- if (err)
- goto out_disable;
-
- /* Get UFS version supported by the controller */
- hba->ufs_version = ufshcd_get_ufs_version(hba);
-
- /* Get Interrupt bit mask per version */
- hba->intr_mask = ufshcd_get_intr_mask(hba);
-
- err = ufshcd_set_dma_mask(hba);
- if (err) {
- dev_err(hba->dev, "set dma mask failed\n");
- goto out_disable;
- }
-
- /* Allocate memory for host memory space */
- err = ufshcd_memory_alloc(hba);
- if (err) {
- dev_err(hba->dev, "Memory allocation failed\n");
- goto out_disable;
- }
-
- /* Configure LRB */
- ufshcd_host_memory_configure(hba);
-
- host->can_queue = hba->nutrs;
- host->cmd_per_lun = hba->nutrs;
- host->max_id = UFSHCD_MAX_ID;
- host->max_lun = UFS_MAX_LUNS;
- host->max_channel = UFSHCD_MAX_CHANNEL;
- host->unique_id = host->host_no;
- host->max_cmd_len = UFS_CDB_SIZE;
-
- hba->max_pwr_info.is_valid = false;
-
- /* Initialize work queues */
- snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
- hba->host->host_no);
- hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
- if (!hba->eh_wq) {
- dev_err(hba->dev, "%s: failed to create eh workqueue\n",
- __func__);
- err = -ENOMEM;
- goto out_disable;
- }
- INIT_WORK(&hba->eh_work, ufshcd_err_handler);
- INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
-
- sema_init(&hba->host_sem, 1);
-
- /* Initialize UIC command mutex */
- mutex_init(&hba->uic_cmd_mutex);
-
- /* Initialize mutex for device management commands */
- mutex_init(&hba->dev_cmd.lock);
-
- /* Initialize mutex for exception event control */
- mutex_init(&hba->ee_ctrl_mutex);
-
- init_rwsem(&hba->clk_scaling_lock);
-
- ufshcd_init_clk_gating(hba);
-
- ufshcd_init_clk_scaling(hba);
-
- /*
- * In order to avoid any spurious interrupt immediately after
- * registering UFS controller interrupt handler, clear any pending UFS
- * interrupt status and disable all the UFS interrupts.
- */
- ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
- REG_INTERRUPT_STATUS);
- ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
- /*
- * Make sure that UFS interrupts are disabled and any pending interrupt
- * status is cleared before registering UFS interrupt handler.
- */
- mb();
-
- /* IRQ registration */
- err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
- if (err) {
- dev_err(hba->dev, "request irq failed\n");
- goto out_disable;
- } else {
- hba->is_irq_enabled = true;
- }
-
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
- }
-
- hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
- if (IS_ERR(hba->cmd_queue)) {
- err = PTR_ERR(hba->cmd_queue);
- goto out_remove_scsi_host;
- }
-
- hba->tmf_tag_set = (struct blk_mq_tag_set) {
- .nr_hw_queues = 1,
- .queue_depth = hba->nutmrs,
- .ops = &ufshcd_tmf_ops,
- .flags = BLK_MQ_F_NO_SCHED,
- };
- err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
- if (err < 0)
- goto free_cmd_queue;
- hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
- if (IS_ERR(hba->tmf_queue)) {
- err = PTR_ERR(hba->tmf_queue);
- goto free_tmf_tag_set;
- }
- hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
- sizeof(*hba->tmf_rqs), GFP_KERNEL);
- if (!hba->tmf_rqs) {
- err = -ENOMEM;
- goto free_tmf_queue;
- }
-
- /* Reset the attached device */
- ufshcd_device_reset(hba);
-
- ufshcd_init_crypto(hba);
-
- /* Host controller enable */
- err = ufshcd_hba_enable(hba);
- if (err) {
- dev_err(hba->dev, "Host controller enable failed\n");
- ufshcd_print_evt_hist(hba);
- ufshcd_print_host_state(hba);
- goto free_tmf_queue;
- }
-
- /*
- * Set the default power management level for runtime and system PM.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
- INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
- ufshcd_rpm_dev_flush_recheck_work);
-
- /* Set the default auto-hiberate idle timer value to 150 ms */
- if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
- hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
- FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
- }
-
- /* Hold auto suspend until async scan completes */
- pm_runtime_get_sync(dev);
- atomic_set(&hba->scsi_block_reqs_cnt, 0);
- /*
- * We are assuming that device wasn't put in sleep/power-down
- * state exclusively during the boot stage before kernel.
- * This assumption helps avoid doing link startup twice during
- * ufshcd_probe_hba().
- */
- ufshcd_set_ufs_dev_active(hba);
-
- async_schedule(ufshcd_async_scan, hba);
- ufs_sysfs_add_nodes(hba->dev);
-
- device_enable_async_suspend(dev);
- return 0;
-
-free_tmf_queue:
- blk_cleanup_queue(hba->tmf_queue);
-free_tmf_tag_set:
- blk_mq_free_tag_set(&hba->tmf_tag_set);
-free_cmd_queue:
- blk_cleanup_queue(hba->cmd_queue);
-out_remove_scsi_host:
- scsi_remove_host(hba->host);
-out_disable:
- hba->is_irq_enabled = false;
- ufshcd_hba_exit(hba);
-out_error:
- return err;
-}
-EXPORT_SYMBOL_GPL(ufshcd_init);
-
-void ufshcd_resume_complete(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
-
- if (hba->complete_put) {
- ufshcd_rpm_put(hba);
- hba->complete_put = false;
- }
-}
-EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
-
-int ufshcd_suspend_prepare(struct device *dev)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- int ret;
-
- /*
- * SCSI assumes that runtime-pm and system-pm for scsi drivers
- * are same. And it doesn't wake up the device for system-suspend
- * if it's runtime suspended. But ufs doesn't follow that.
- * Refer ufshcd_resume_complete()
- */
- if (hba->sdev_ufs_device) {
- ret = ufshcd_rpm_get_sync(hba);
- if (ret < 0 && ret != -EACCES) {
- ufshcd_rpm_put(hba);
- return ret;
- }
- hba->complete_put = true;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
-
-#ifdef CONFIG_PM_SLEEP
-static int ufshcd_wl_poweroff(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufs_hba *hba = shost_priv(sdev->host);
-
- __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
- return 0;
-}
-#endif
-
-static int ufshcd_wl_probe(struct device *dev)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- if (!is_device_wlun(sdev))
- return -ENODEV;
-
- blk_pm_runtime_init(sdev->request_queue, dev);
- pm_runtime_set_autosuspend_delay(dev, 0);
- pm_runtime_allow(dev);
-
- return 0;
-}
-
-static int ufshcd_wl_remove(struct device *dev)
-{
- pm_runtime_forbid(dev);
- return 0;
-}
-
-static const struct dev_pm_ops ufshcd_wl_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = ufshcd_wl_suspend,
- .resume = ufshcd_wl_resume,
- .freeze = ufshcd_wl_suspend,
- .thaw = ufshcd_wl_resume,
- .poweroff = ufshcd_wl_poweroff,
- .restore = ufshcd_wl_resume,
-#endif
- SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
-};
-
-/*
- * ufs_dev_wlun_template - describes ufs device wlun
- * ufs-device wlun - used to send pm commands
- * All luns are consumers of ufs-device wlun.
- *
- * Currently, no sd driver is present for wluns.
- * Hence the no specific pm operations are performed.
- * With ufs design, SSU should be sent to ufs-device wlun.
- * Hence register a scsi driver for ufs wluns only.
- */
-static struct scsi_driver ufs_dev_wlun_template = {
- .gendrv = {
- .name = "ufs_device_wlun",
- .owner = THIS_MODULE,
- .probe = ufshcd_wl_probe,
- .remove = ufshcd_wl_remove,
- .pm = &ufshcd_wl_pm_ops,
- .shutdown = ufshcd_wl_shutdown,
- },
-};
-
-static int __init ufshcd_core_init(void)
-{
- int ret;
-
- /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
- static_assert(sizeof(struct utp_transfer_cmd_desc) ==
- 2 * ALIGNED_UPIU_SIZE +
- SG_ALL * sizeof(struct ufshcd_sg_entry));
-
- ufs_debugfs_init();
-
- ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
- if (ret)
- ufs_debugfs_exit();
- return ret;
-}
-
-static void __exit ufshcd_core_exit(void)
-{
- ufs_debugfs_exit();
- scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
-}
-
-module_init(ufshcd_core_init);
-module_exit(ufshcd_core_exit);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
-MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver Core");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
deleted file mode 100644
index 54750d72c8fb..000000000000
--- a/drivers/scsi/ufs/ufshcd.h
+++ /dev/null
@@ -1,1428 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Universal Flash Storage Host controller driver
- * Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#ifndef _UFSHCD_H
-#define _UFSHCD_H
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/rwsem.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/regulator/consumer.h>
-#include <linux/bitfield.h>
-#include <linux/devfreq.h>
-#include <linux/blk-crypto-profile.h>
-#include "unipro.h"
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufs_quirks.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.2"
-
-struct ufs_hba;
-
-enum dev_cmd_type {
- DEV_CMD_TYPE_NOP = 0x0,
- DEV_CMD_TYPE_QUERY = 0x1,
-};
-
-enum ufs_event_type {
- /* uic specific errors */
- UFS_EVT_PA_ERR = 0,
- UFS_EVT_DL_ERR,
- UFS_EVT_NL_ERR,
- UFS_EVT_TL_ERR,
- UFS_EVT_DME_ERR,
-
- /* fatal errors */
- UFS_EVT_AUTO_HIBERN8_ERR,
- UFS_EVT_FATAL_ERR,
- UFS_EVT_LINK_STARTUP_FAIL,
- UFS_EVT_RESUME_ERR,
- UFS_EVT_SUSPEND_ERR,
- UFS_EVT_WL_SUSP_ERR,
- UFS_EVT_WL_RES_ERR,
-
- /* abnormal events */
- UFS_EVT_DEV_RESET,
- UFS_EVT_HOST_RESET,
- UFS_EVT_ABORT,
-
- UFS_EVT_CNT,
-};
-
-/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @done: UIC command completion
- */
-struct uic_command {
- u32 command;
- u32 argument1;
- u32 argument2;
- u32 argument3;
- int cmd_active;
- struct completion done;
-};
-
-/* Used to differentiate the power management options */
-enum ufs_pm_op {
- UFS_RUNTIME_PM,
- UFS_SYSTEM_PM,
- UFS_SHUTDOWN_PM,
-};
-
-/* Host <-> Device UniPro Link state */
-enum uic_link_state {
- UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
- UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
- UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
- UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
-};
-
-#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
-#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
- UIC_LINK_ACTIVE_STATE)
-#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
- UIC_LINK_HIBERN8_STATE)
-#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
- UIC_LINK_BROKEN_STATE)
-#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
-#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
- UIC_LINK_ACTIVE_STATE)
-#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
- UIC_LINK_HIBERN8_STATE)
-#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
- UIC_LINK_BROKEN_STATE)
-
-#define ufshcd_set_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
-#define ufshcd_set_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
-#define ufshcd_set_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
-#define ufshcd_set_ufs_dev_deepsleep(h) \
- ((h)->curr_dev_pwr_mode = UFS_DEEPSLEEP_PWR_MODE)
-#define ufshcd_is_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
-#define ufshcd_is_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
-#define ufshcd_is_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
-#define ufshcd_is_ufs_dev_deepsleep(h) \
- ((h)->curr_dev_pwr_mode == UFS_DEEPSLEEP_PWR_MODE)
-
-/*
- * UFS Power management levels.
- * Each level is in increasing order of power savings, except DeepSleep
- * which is lower than PowerDown with power on but not PowerDown with
- * power off.
- */
-enum ufs_pm_level {
- UFS_PM_LVL_0,
- UFS_PM_LVL_1,
- UFS_PM_LVL_2,
- UFS_PM_LVL_3,
- UFS_PM_LVL_4,
- UFS_PM_LVL_5,
- UFS_PM_LVL_6,
- UFS_PM_LVL_MAX
-};
-
-struct ufs_pm_lvl_states {
- enum ufs_dev_pwr_mode dev_state;
- enum uic_link_state link_state;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_req_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @utrd_dma_addr: UTRD dma address for debug
- * @ucd_prdt_dma_addr: PRDT dma address for debug
- * @ucd_rsp_dma_addr: UPIU response dma address for debug
- * @ucd_req_dma_addr: UPIU request dma address for debug
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
- * @issue_time_stamp: time stamp for debug purposes
- * @compl_time_stamp: time stamp for statistics
- * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
- * @data_unit_num: the data unit number for the first block for inline crypto
- * @req_abort_skip: skip request abort task flag
- */
-struct ufshcd_lrb {
- struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_req *ucd_req_ptr;
- struct utp_upiu_rsp *ucd_rsp_ptr;
- struct ufshcd_sg_entry *ucd_prdt_ptr;
-
- dma_addr_t utrd_dma_addr;
- dma_addr_t ucd_req_dma_addr;
- dma_addr_t ucd_rsp_dma_addr;
- dma_addr_t ucd_prdt_dma_addr;
-
- struct scsi_cmnd *cmd;
- u8 *sense_buffer;
- unsigned int sense_bufflen;
- int scsi_status;
-
- int command_type;
- int task_tag;
- u8 lun; /* UPIU LUN id field is only 8-bit wide */
- bool intr_cmd;
- ktime_t issue_time_stamp;
- ktime_t compl_time_stamp;
-#ifdef CONFIG_SCSI_UFS_CRYPTO
- int crypto_key_slot;
- u64 data_unit_num;
-#endif
-
- bool req_abort_skip;
-};
-
-/**
- * struct ufs_query - holds relevant data structures for query request
- * @request: request upiu and function
- * @descriptor: buffer for sending/receiving descriptor
- * @response: response upiu and response
- */
-struct ufs_query {
- struct ufs_query_req request;
- u8 *descriptor;
- struct ufs_query_res response;
-};
-
-/**
- * struct ufs_dev_cmd - all assosiated fields with device management commands
- * @type: device management command type - Query, NOP OUT
- * @lock: lock to allow one command at a time
- * @complete: internal commands completion
- */
-struct ufs_dev_cmd {
- enum dev_cmd_type type;
- struct mutex lock;
- struct completion *complete;
- struct ufs_query query;
-};
-
-/**
- * struct ufs_clk_info - UFS clock related info
- * @list: list headed by hba->clk_list_head
- * @clk: clock node
- * @name: clock name
- * @max_freq: maximum frequency supported by the clock
- * @min_freq: min frequency that can be used for clock scaling
- * @curr_freq: indicates the current frequency that it is set to
- * @keep_link_active: indicates that the clk should not be disabled if
- link is active
- * @enabled: variable to check against multiple enable/disable
- */
-struct ufs_clk_info {
- struct list_head list;
- struct clk *clk;
- const char *name;
- u32 max_freq;
- u32 min_freq;
- u32 curr_freq;
- bool keep_link_active;
- bool enabled;
-};
-
-enum ufs_notify_change_status {
- PRE_CHANGE,
- POST_CHANGE,
-};
-
-struct ufs_pa_layer_attr {
- u32 gear_rx;
- u32 gear_tx;
- u32 lane_rx;
- u32 lane_tx;
- u32 pwr_rx;
- u32 pwr_tx;
- u32 hs_rate;
-};
-
-struct ufs_pwr_mode_info {
- bool is_valid;
- struct ufs_pa_layer_attr info;
-};
-
-/**
- * struct ufs_hba_variant_ops - variant specific callbacks
- * @name: variant name
- * @init: called when the driver is initialized
- * @exit: called to cleanup everything done in init
- * @get_ufs_hci_version: called to get UFS HCI version
- * @clk_scale_notify: notifies that clks are scaled up/down
- * @setup_clocks: called before touching any of the controller registers
- * @hce_enable_notify: called before and after HCE enable bit is set to allow
- * variant specific Uni-Pro initialization.
- * @link_startup_notify: called before and after Link startup is carried out
- * to allow variant specific Uni-Pro initialization.
- * @pwr_change_notify: called before and after a power mode change
- * is carried out to allow vendor spesific capabilities
- * to be set.
- * @setup_xfer_req: called before any transfer request is issued
- * to set some things
- * @setup_task_mgmt: called before any task management request is issued
- * to set some things
- * @hibern8_notify: called around hibern8 enter/exit
- * @apply_dev_quirks: called to apply device specific quirks
- * @suspend: called during host controller PM callback
- * @resume: called during host controller PM callback
- * @dbg_register_dump: used to dump controller debug information
- * @phy_initialization: used to initialize phys
- * @device_reset: called to issue a reset pulse on the UFS device
- * @program_key: program or evict an inline encryption key
- * @event_notify: called to notify important events
- */
-struct ufs_hba_variant_ops {
- const char *name;
- int (*init)(struct ufs_hba *);
- void (*exit)(struct ufs_hba *);
- u32 (*get_ufs_hci_version)(struct ufs_hba *);
- int (*clk_scale_notify)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool,
- enum ufs_notify_change_status);
- int (*hce_enable_notify)(struct ufs_hba *,
- enum ufs_notify_change_status);
- int (*link_startup_notify)(struct ufs_hba *,
- enum ufs_notify_change_status);
- int (*pwr_change_notify)(struct ufs_hba *,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *,
- struct ufs_pa_layer_attr *);
- void (*setup_xfer_req)(struct ufs_hba *, int, bool);
- void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
- void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
- enum ufs_notify_change_status);
- int (*apply_dev_quirks)(struct ufs_hba *hba);
- void (*fixup_dev_quirks)(struct ufs_hba *hba);
- int (*suspend)(struct ufs_hba *, enum ufs_pm_op,
- enum ufs_notify_change_status);
- int (*resume)(struct ufs_hba *, enum ufs_pm_op);
- void (*dbg_register_dump)(struct ufs_hba *hba);
- int (*phy_initialization)(struct ufs_hba *);
- int (*device_reset)(struct ufs_hba *hba);
- void (*config_scaling_param)(struct ufs_hba *hba,
- struct devfreq_dev_profile *profile,
- void *data);
- int (*program_key)(struct ufs_hba *hba,
- const union ufs_crypto_cfg_entry *cfg, int slot);
- void (*event_notify)(struct ufs_hba *hba,
- enum ufs_event_type evt, void *data);
-};
-
-/* clock gating state */
-enum clk_gating_state {
- CLKS_OFF,
- CLKS_ON,
- REQ_CLKS_OFF,
- REQ_CLKS_ON,
-};
-
-/**
- * struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
- * @ungate_work: worker to turn on clocks that will be used in case of
- * interrupt context
- * @state: the current clocks state
- * @delay_ms: gating delay in ms
- * @is_suspended: clk gating is suspended when set to 1 which can be used
- * during suspend/resume
- * @delay_attr: sysfs attribute to control delay_attr
- * @enable_attr: sysfs attribute to enable/disable clock gating
- * @is_enabled: Indicates the current status of clock gating
- * @is_initialized: Indicates whether clock gating is initialized or not
- * @active_reqs: number of requests that are pending and should be waited for
- * completion before gating clocks.
- */
-struct ufs_clk_gating {
- struct delayed_work gate_work;
- struct work_struct ungate_work;
- enum clk_gating_state state;
- unsigned long delay_ms;
- bool is_suspended;
- struct device_attribute delay_attr;
- struct device_attribute enable_attr;
- bool is_enabled;
- bool is_initialized;
- int active_reqs;
- struct workqueue_struct *clk_gating_workq;
-};
-
-struct ufs_saved_pwr_info {
- struct ufs_pa_layer_attr info;
- bool is_valid;
-};
-
-/**
- * struct ufs_clk_scaling - UFS clock scaling related data
- * @active_reqs: number of requests that are pending. If this is zero when
- * devfreq ->target() function is called then schedule "suspend_work" to
- * suspend devfreq.
- * @tot_busy_t: Total busy time in current polling window
- * @window_start_t: Start time (in jiffies) of the current polling window
- * @busy_start_t: Start time of current busy period
- * @enable_attr: sysfs attribute to enable/disable clock scaling
- * @saved_pwr_info: UFS power mode may also be changed during scaling and this
- * one keeps track of previous power mode.
- * @workq: workqueue to schedule devfreq suspend/resume work
- * @suspend_work: worker to suspend devfreq
- * @resume_work: worker to resume devfreq
- * @min_gear: lowest HS gear to scale down to
- * @is_enabled: tracks if scaling is currently enabled or not, controlled by
- clkscale_enable sysfs node
- * @is_allowed: tracks if scaling is currently allowed or not, used to block
- clock scaling which is not invoked from devfreq governor
- * @is_initialized: Indicates whether clock scaling is initialized or not
- * @is_busy_started: tracks if busy period has started or not
- * @is_suspended: tracks if devfreq is suspended or not
- */
-struct ufs_clk_scaling {
- int active_reqs;
- unsigned long tot_busy_t;
- ktime_t window_start_t;
- ktime_t busy_start_t;
- struct device_attribute enable_attr;
- struct ufs_saved_pwr_info saved_pwr_info;
- struct workqueue_struct *workq;
- struct work_struct suspend_work;
- struct work_struct resume_work;
- u32 min_gear;
- bool is_enabled;
- bool is_allowed;
- bool is_initialized;
- bool is_busy_started;
- bool is_suspended;
-};
-
-#define UFS_EVENT_HIST_LENGTH 8
-/**
- * struct ufs_event_hist - keeps history of errors
- * @pos: index to indicate cyclic buffer position
- * @reg: cyclic buffer for registers value
- * @tstamp: cyclic buffer for time stamp
- * @cnt: error counter
- */
-struct ufs_event_hist {
- int pos;
- u32 val[UFS_EVENT_HIST_LENGTH];
- ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
- unsigned long long cnt;
-};
-
-/**
- * struct ufs_stats - keeps usage/err statistics
- * @last_intr_status: record the last interrupt status.
- * @last_intr_ts: record the last interrupt timestamp.
- * @hibern8_exit_cnt: Counter to keep track of number of exits,
- * reset this after link-startup.
- * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
- * Clear after the first successful command completion.
- */
-struct ufs_stats {
- u32 last_intr_status;
- ktime_t last_intr_ts;
-
- u32 hibern8_exit_cnt;
- ktime_t last_hibern8_exit_tstamp;
- struct ufs_event_hist event[UFS_EVT_CNT];
-};
-
-/**
- * enum ufshcd_state - UFS host controller state
- * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command
- * processing.
- * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process
- * SCSI commands.
- * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled.
- * SCSI commands may be submitted to the controller.
- * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail
- * newly submitted SCSI commands with error code DID_BAD_TARGET.
- * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery
- * failed. Fail all SCSI commands with error code DID_ERROR.
- */
-enum ufshcd_state {
- UFSHCD_STATE_RESET,
- UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
- UFSHCD_STATE_EH_SCHEDULED_FATAL,
- UFSHCD_STATE_ERROR,
-};
-
-enum ufshcd_quirks {
- /* Interrupt aggregation support is broken */
- UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0,
-
- /*
- * delay before each dme command is required as the unipro
- * layer has shown instabilities
- */
- UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS = 1 << 1,
-
- /*
- * If UFS host controller is having issue in processing LCC (Line
- * Control Command) coming from device then enable this quirk.
- * When this quirk is enabled, host controller driver should disable
- * the LCC transmission on UFS device (by clearing TX_LCC_ENABLE
- * attribute of device to 0).
- */
- UFSHCD_QUIRK_BROKEN_LCC = 1 << 2,
-
- /*
- * The attribute PA_RXHSUNTERMCAP specifies whether or not the
- * inbound Link supports unterminated line in HS mode. Setting this
- * attribute to 1 fixes moving to HS gear.
- */
- UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP = 1 << 3,
-
- /*
- * This quirk needs to be enabled if the host controller only allows
- * accessing the peer dme attributes in AUTO mode (FAST AUTO or
- * SLOW AUTO).
- */
- UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE = 1 << 4,
-
- /*
- * This quirk needs to be enabled if the host controller doesn't
- * advertise the correct version in UFS_VER register. If this quirk
- * is enabled, standard UFS host driver will call the vendor specific
- * ops (get_ufs_hci_version) to get the correct version.
- */
- UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION = 1 << 5,
-
- /*
- * Clear handling for transfer/task request list is just opposite.
- */
- UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR = 1 << 6,
-
- /*
- * This quirk needs to be enabled if host controller doesn't allow
- * that the interrupt aggregation timer and counter are reset by s/w.
- */
- UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR = 1 << 7,
-
- /*
- * This quirks needs to be enabled if host controller cannot be
- * enabled via HCE register.
- */
- UFSHCI_QUIRK_BROKEN_HCE = 1 << 8,
-
- /*
- * This quirk needs to be enabled if the host controller regards
- * resolution of the values of PRDTO and PRDTL in UTRD as byte.
- */
- UFSHCD_QUIRK_PRDT_BYTE_GRAN = 1 << 9,
-
- /*
- * This quirk needs to be enabled if the host controller reports
- * OCS FATAL ERROR with device error through sense data
- */
- UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR = 1 << 10,
-
- /*
- * This quirk needs to be enabled if the host controller has
- * auto-hibernate capability but it doesn't work.
- */
- UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
-
- /*
- * This quirk needs to disable manual flush for write booster
- */
- UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
-
- /*
- * This quirk needs to disable unipro timeout values
- * before power mode change
- */
- UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
-
- /*
- * This quirk allows only sg entries aligned with page size.
- */
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
-
- /*
- * This quirk needs to be enabled if the host controller does not
- * support UIC command
- */
- UFSHCD_QUIRK_BROKEN_UIC_CMD = 1 << 15,
-
- /*
- * This quirk needs to be enabled if the host controller cannot
- * support physical host configuration.
- */
- UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16,
-};
-
-enum ufshcd_caps {
- /* Allow dynamic clk gating */
- UFSHCD_CAP_CLK_GATING = 1 << 0,
-
- /* Allow hiberb8 with clk gating */
- UFSHCD_CAP_HIBERN8_WITH_CLK_GATING = 1 << 1,
-
- /* Allow dynamic clk scaling */
- UFSHCD_CAP_CLK_SCALING = 1 << 2,
-
- /* Allow auto bkops to enabled during runtime suspend */
- UFSHCD_CAP_AUTO_BKOPS_SUSPEND = 1 << 3,
-
- /*
- * This capability allows host controller driver to use the UFS HCI's
- * interrupt aggregation capability.
- * CAUTION: Enabling this might reduce overall UFS throughput.
- */
- UFSHCD_CAP_INTR_AGGR = 1 << 4,
-
- /*
- * This capability allows the device auto-bkops to be always enabled
- * except during suspend (both runtime and suspend).
- * Enabling this capability means that device will always be allowed
- * to do background operation when it's active but it might degrade
- * the performance of ongoing read/write operations.
- */
- UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND = 1 << 5,
-
- /*
- * This capability allows host controller driver to automatically
- * enable runtime power management by itself instead of waiting
- * for userspace to control the power management.
- */
- UFSHCD_CAP_RPM_AUTOSUSPEND = 1 << 6,
-
- /*
- * This capability allows the host controller driver to turn-on
- * WriteBooster, if the underlying device supports it and is
- * provisioned to be used. This would increase the write performance.
- */
- UFSHCD_CAP_WB_EN = 1 << 7,
-
- /*
- * This capability allows the host controller driver to use the
- * inline crypto engine, if it is present
- */
- UFSHCD_CAP_CRYPTO = 1 << 8,
-
- /*
- * This capability allows the controller regulators to be put into
- * lpm mode aggressively during clock gating.
- * This would increase power savings.
- */
- UFSHCD_CAP_AGGR_POWER_COLLAPSE = 1 << 9,
-
- /*
- * This capability allows the host controller driver to use DeepSleep,
- * if it is supported by the UFS device. The host controller driver must
- * support device hardware reset via the hba->device_reset() callback,
- * in order to exit DeepSleep state.
- */
- UFSHCD_CAP_DEEPSLEEP = 1 << 10,
-
- /*
- * This capability allows the host controller driver to use temperature
- * notification if it is supported by the UFS device.
- */
- UFSHCD_CAP_TEMP_NOTIF = 1 << 11,
-};
-
-struct ufs_hba_variant_params {
- struct devfreq_dev_profile devfreq_profile;
- struct devfreq_simple_ondemand_data ondemand_data;
- u16 hba_enable_delay_us;
- u32 wb_flush_threshold;
-};
-
-#ifdef CONFIG_SCSI_UFS_HPB
-/**
- * struct ufshpb_dev_info - UFSHPB device related info
- * @num_lu: the number of user logical unit to check whether all lu finished
- * initialization
- * @rgn_size: device reported HPB region size
- * @srgn_size: device reported HPB sub-region size
- * @slave_conf_cnt: counter to check all lu finished initialization
- * @hpb_disabled: flag to check if HPB is disabled
- * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
- * @is_legacy: flag to check HPB 1.0
- * @control_mode: either host or device
- */
-struct ufshpb_dev_info {
- int num_lu;
- int rgn_size;
- int srgn_size;
- atomic_t slave_conf_cnt;
- bool hpb_disabled;
- u8 max_hpb_single_cmd;
- bool is_legacy;
- u8 control_mode;
-};
-#endif
-
-struct ufs_hba_monitor {
- unsigned long chunk_size;
-
- unsigned long nr_sec_rw[2];
- ktime_t total_busy[2];
-
- unsigned long nr_req[2];
- /* latencies*/
- ktime_t lat_sum[2];
- ktime_t lat_max[2];
- ktime_t lat_min[2];
-
- u32 nr_queued[2];
- ktime_t busy_start_ts[2];
-
- ktime_t enabled_ts;
- bool enabled;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @dev: device handle
- * @lrb: local reference block
- * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_lock: Protects @outstanding_reqs.
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @ufs_version: UFS Version to which controller complies
- * @vops: pointer to variant specific operations
- * @priv: pointer to variant specific private data
- * @irq: Irq number of the controller
- * @active_uic_cmd: handle of active UIC command
- * @uic_cmd_mutex: mutex for UIC command
- * @tmf_tag_set: TMF tag set.
- * @tmf_queue: Used to allocate TMF tags.
- * @pwr_done: completion for power mode change
- * @ufshcd_state: UFSHCD state
- * @eh_flags: Error handling flags
- * @intr_mask: Interrupt Mask Bits
- * @ee_ctrl_mask: Exception event control mask
- * @is_powered: flag to check if HBA is powered
- * @shutting_down: flag to check if shutdown has been invoked
- * @host_sem: semaphore used to serialize concurrent contexts
- * @eh_wq: Workqueue that eh_work works on
- * @eh_work: Worker to handle UFS errors that require s/w attention
- * @eeh_work: Worker to handle exception events
- * @errors: HBA errors
- * @uic_error: UFS interconnect layer error status
- * @saved_err: sticky error mask
- * @saved_uic_err: sticky UIC error mask
- * @force_reset: flag to force eh_work perform a full reset
- * @force_pmc: flag to force a power mode change
- * @silence_err_logs: flag to silence error logs
- * @dev_cmd: ufs device management command information
- * @last_dme_cmd_tstamp: time stamp of the last completed DME command
- * @auto_bkops_enabled: to track whether bkops is enabled in device
- * @vreg_info: UFS device voltage regulator information
- * @clk_list_head: UFS host controller clocks list node head
- * @pwr_info: holds current power mode
- * @max_pwr_info: keeps the device max valid pwm
- * @desc_size: descriptor sizes reported by device
- * @urgent_bkops_lvl: keeps track of urgent bkops level for device
- * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
- * device is known or not.
- * @scsi_block_reqs_cnt: reference counting for scsi block requests
- * @crypto_capabilities: Content of crypto capabilities register (0x100)
- * @crypto_cap_array: Array of crypto capabilities
- * @crypto_cfg_register: Start of the crypto cfg array
- * @crypto_profile: the crypto profile of this hba (if applicable)
- */
-struct ufs_hba {
- void __iomem *mmio_base;
-
- /* Virtual memory reference */
- struct utp_transfer_cmd_desc *ucdl_base_addr;
- struct utp_transfer_req_desc *utrdl_base_addr;
- struct utp_task_req_desc *utmrdl_base_addr;
-
- /* DMA memory reference */
- dma_addr_t ucdl_dma_addr;
- dma_addr_t utrdl_dma_addr;
- dma_addr_t utmrdl_dma_addr;
-
- struct Scsi_Host *host;
- struct device *dev;
- struct request_queue *cmd_queue;
- /*
- * This field is to keep a reference to "scsi_device" corresponding to
- * "UFS device" W-LU.
- */
- struct scsi_device *sdev_ufs_device;
- struct scsi_device *sdev_rpmb;
-
-#ifdef CONFIG_SCSI_UFS_HWMON
- struct device *hwmon_device;
-#endif
-
- enum ufs_dev_pwr_mode curr_dev_pwr_mode;
- enum uic_link_state uic_link_state;
- /* Desired UFS power management level during runtime PM */
- enum ufs_pm_level rpm_lvl;
- /* Desired UFS power management level during system PM */
- enum ufs_pm_level spm_lvl;
- struct device_attribute rpm_lvl_attr;
- struct device_attribute spm_lvl_attr;
- int pm_op_in_progress;
-
- /* Auto-Hibernate Idle Timer register value */
- u32 ahit;
-
- struct ufshcd_lrb *lrb;
-
- unsigned long outstanding_tasks;
- spinlock_t outstanding_lock;
- unsigned long outstanding_reqs;
-
- u32 capabilities;
- int nutrs;
- int nutmrs;
- u32 ufs_version;
- const struct ufs_hba_variant_ops *vops;
- struct ufs_hba_variant_params *vps;
- void *priv;
- unsigned int irq;
- bool is_irq_enabled;
- enum ufs_ref_clk_freq dev_ref_clk_freq;
-
- unsigned int quirks; /* Deviations from standard UFSHCI spec. */
-
- /* Device deviations from standard UFS device spec. */
- unsigned int dev_quirks;
-
- struct blk_mq_tag_set tmf_tag_set;
- struct request_queue *tmf_queue;
- struct request **tmf_rqs;
-
- struct uic_command *active_uic_cmd;
- struct mutex uic_cmd_mutex;
- struct completion *uic_async_done;
-
- enum ufshcd_state ufshcd_state;
- u32 eh_flags;
- u32 intr_mask;
- u16 ee_ctrl_mask; /* Exception event mask */
- u16 ee_drv_mask; /* Exception event mask for driver */
- u16 ee_usr_mask; /* Exception event mask for user (via debugfs) */
- struct mutex ee_ctrl_mutex;
- bool is_powered;
- bool shutting_down;
- struct semaphore host_sem;
-
- /* Work Queues */
- struct workqueue_struct *eh_wq;
- struct work_struct eh_work;
- struct work_struct eeh_work;
-
- /* HBA Errors */
- u32 errors;
- u32 uic_error;
- u32 saved_err;
- u32 saved_uic_err;
- struct ufs_stats ufs_stats;
- bool force_reset;
- bool force_pmc;
- bool silence_err_logs;
-
- /* Device management request data */
- struct ufs_dev_cmd dev_cmd;
- ktime_t last_dme_cmd_tstamp;
- int nop_out_timeout;
-
- /* Keeps information of the UFS device connected to this host */
- struct ufs_dev_info dev_info;
- bool auto_bkops_enabled;
- struct ufs_vreg_info vreg_info;
- struct list_head clk_list_head;
-
- /* Number of requests aborts */
- int req_abort_count;
-
- /* Number of lanes available (1 or 2) for Rx/Tx */
- u32 lanes_per_direction;
- struct ufs_pa_layer_attr pwr_info;
- struct ufs_pwr_mode_info max_pwr_info;
-
- struct ufs_clk_gating clk_gating;
- /* Control to enable/disable host capabilities */
- u32 caps;
-
- struct devfreq *devfreq;
- struct ufs_clk_scaling clk_scaling;
- bool is_sys_suspended;
-
- enum bkops_status urgent_bkops_lvl;
- bool is_urgent_bkops_lvl_checked;
-
- struct rw_semaphore clk_scaling_lock;
- unsigned char desc_size[QUERY_DESC_IDN_MAX];
- atomic_t scsi_block_reqs_cnt;
-
- struct device bsg_dev;
- struct request_queue *bsg_queue;
- struct delayed_work rpm_dev_flush_recheck_work;
-
-#ifdef CONFIG_SCSI_UFS_HPB
- struct ufshpb_dev_info ufshpb_dev;
-#endif
-
- struct ufs_hba_monitor monitor;
-
-#ifdef CONFIG_SCSI_UFS_CRYPTO
- union ufs_crypto_capabilities crypto_capabilities;
- union ufs_crypto_cap_entry *crypto_cap_array;
- u32 crypto_cfg_register;
- struct blk_crypto_profile crypto_profile;
-#endif
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_root;
- struct delayed_work debugfs_ee_work;
- u32 debugfs_ee_rate_limit_ms;
-#endif
- u32 luns_avail;
- bool complete_put;
-};
-
-/* Returns true if clocks can be gated. Otherwise false */
-static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_CLK_GATING;
-}
-static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
-}
-static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_CLK_SCALING;
-}
-static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
-}
-static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND;
-}
-
-static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
-{
- return (hba->caps & UFSHCD_CAP_INTR_AGGR) &&
- !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR);
-}
-
-static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
-{
- return !!(ufshcd_is_link_hibern8(hba) &&
- (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
-}
-
-static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
-{
- return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
- !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8);
-}
-
-static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
-{
- return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
-}
-
-static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_WB_EN;
-}
-
-static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
-{
- return !hba->shutting_down;
-}
-
-#define ufshcd_writel(hba, val, reg) \
- writel((val), (hba)->mmio_base + (reg))
-#define ufshcd_readl(hba, reg) \
- readl((hba)->mmio_base + (reg))
-
-/**
- * ufshcd_rmwl - read modify write into a register
- * @hba - per adapter instance
- * @mask - mask to apply on read value
- * @val - actual value to write
- * @reg - register address
- */
-static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
-{
- u32 tmp;
-
- tmp = ufshcd_readl(hba, reg);
- tmp &= ~mask;
- tmp |= (val & mask);
- ufshcd_writel(hba, tmp, reg);
-}
-
-int ufshcd_alloc_host(struct device *, struct ufs_hba **);
-void ufshcd_dealloc_host(struct ufs_hba *);
-int ufshcd_hba_enable(struct ufs_hba *hba);
-int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
-int ufshcd_link_recovery(struct ufs_hba *hba);
-int ufshcd_make_hba_operational(struct ufs_hba *hba);
-void ufshcd_remove(struct ufs_hba *);
-int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
-int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
-void ufshcd_delay_us(unsigned long us, unsigned long tolerance);
-int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us,
- unsigned long timeout_ms);
-void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
-void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
-void ufshcd_hba_stop(struct ufs_hba *hba);
-void ufshcd_schedule_eh_work(struct ufs_hba *hba);
-
-static inline void check_upiu_size(void)
-{
- BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
- GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
-}
-
-/**
- * ufshcd_set_variant - set variant specific data to the hba
- * @hba - per adapter instance
- * @variant - pointer to variant specific data
- */
-static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
-{
- BUG_ON(!hba);
- hba->priv = variant;
-}
-
-/**
- * ufshcd_get_variant - get variant specific data from the hba
- * @hba - per adapter instance
- */
-static inline void *ufshcd_get_variant(struct ufs_hba *hba)
-{
- BUG_ON(!hba);
- return hba->priv;
-}
-static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
- struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
-}
-
-static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
-{
- if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
- return hba->dev_info.wb_dedicated_lu;
- return 0;
-}
-
-#ifdef CONFIG_SCSI_UFS_HWMON
-void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask);
-void ufs_hwmon_remove(struct ufs_hba *hba);
-void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask);
-#else
-static inline void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask) {}
-static inline void ufs_hwmon_remove(struct ufs_hba *hba) {}
-static inline void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask) {}
-#endif
-
-#ifdef CONFIG_PM
-extern int ufshcd_runtime_suspend(struct device *dev);
-extern int ufshcd_runtime_resume(struct device *dev);
-#endif
-#ifdef CONFIG_PM_SLEEP
-extern int ufshcd_system_suspend(struct device *dev);
-extern int ufshcd_system_resume(struct device *dev);
-#endif
-extern int ufshcd_shutdown(struct ufs_hba *hba);
-extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
- int agreed_gear,
- int adapt_val);
-extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
- u8 attr_set, u32 mib_val, u8 peer);
-extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
- u32 *mib_val, u8 peer);
-extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode);
-
-/* UIC command interfaces for DME primitives */
-#define DME_LOCAL 0
-#define DME_PEER 1
-#define ATTR_SET_NOR 0 /* NORMAL */
-#define ATTR_SET_ST 1 /* STATIC */
-
-static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
- mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
- mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
- mib_val, DME_PEER);
-}
-
-static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
- u32 mib_val)
-{
- return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
- mib_val, DME_PEER);
-}
-
-static inline int ufshcd_dme_get(struct ufs_hba *hba,
- u32 attr_sel, u32 *mib_val)
-{
- return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
-}
-
-static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
- u32 attr_sel, u32 *mib_val)
-{
- return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
-}
-
-static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
-{
- return (pwr_info->pwr_rx == FAST_MODE ||
- pwr_info->pwr_rx == FASTAUTO_MODE) &&
- (pwr_info->pwr_tx == FAST_MODE ||
- pwr_info->pwr_tx == FASTAUTO_MODE);
-}
-
-static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
-{
- return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
-}
-
-/* Expose Query-Request API */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
- enum query_opcode opcode,
- enum desc_idn idn, u8 index,
- u8 selector,
- u8 *desc_buf, int *buf_len);
-int ufshcd_read_desc_param(struct ufs_hba *hba,
- enum desc_idn desc_id,
- int desc_index,
- u8 param_offset,
- u8 *param_read_buf,
- u8 param_size);
-int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector,
- u32 *attr_val);
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
- enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
-int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
- enum flag_idn idn, u8 index, bool *flag_res);
-
-void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
-void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, struct ufs_dev_fix *fixups);
-#define SD_ASCII_STD true
-#define SD_RAW false
-int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
- u8 **buf, bool ascii);
-
-int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
-
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
-u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
-
-int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
-
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op);
-
-int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
-int ufshcd_suspend_prepare(struct device *dev);
-void ufshcd_resume_complete(struct device *dev);
-
-/* Wrapper functions for safely calling variant operations */
-static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
-{
- if (hba->vops)
- return hba->vops->name;
- return "";
-}
-
-static inline int ufshcd_vops_init(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->init)
- return hba->vops->init(hba);
-
- return 0;
-}
-
-static inline void ufshcd_vops_exit(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->exit)
- return hba->vops->exit(hba);
-}
-
-static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->get_ufs_hci_version)
- return hba->vops->get_ufs_hci_version(hba);
-
- return ufshcd_readl(hba, REG_UFS_VERSION);
-}
-
-static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
- bool up, enum ufs_notify_change_status status)
-{
- if (hba->vops && hba->vops->clk_scale_notify)
- return hba->vops->clk_scale_notify(hba, up, status);
- return 0;
-}
-
-static inline void ufshcd_vops_event_notify(struct ufs_hba *hba,
- enum ufs_event_type evt,
- void *data)
-{
- if (hba->vops && hba->vops->event_notify)
- hba->vops->event_notify(hba, evt, data);
-}
-
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
- enum ufs_notify_change_status status)
-{
- if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on, status);
- return 0;
-}
-
-static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
- bool status)
-{
- if (hba->vops && hba->vops->hce_enable_notify)
- return hba->vops->hce_enable_notify(hba, status);
-
- return 0;
-}
-static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
- bool status)
-{
- if (hba->vops && hba->vops->link_startup_notify)
- return hba->vops->link_startup_notify(hba, status);
-
- return 0;
-}
-
-static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->phy_initialization)
- return hba->vops->phy_initialization(hba);
-
- return 0;
-}
-
-static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
-{
- if (hba->vops && hba->vops->pwr_change_notify)
- return hba->vops->pwr_change_notify(hba, status,
- dev_max_params, dev_req_params);
-
- return -ENOTSUPP;
-}
-
-static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
- int tag, u8 tm_function)
-{
- if (hba->vops && hba->vops->setup_task_mgmt)
- return hba->vops->setup_task_mgmt(hba, tag, tm_function);
-}
-
-static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
- enum uic_cmd_dme cmd,
- enum ufs_notify_change_status status)
-{
- if (hba->vops && hba->vops->hibern8_notify)
- return hba->vops->hibern8_notify(hba, cmd, status);
-}
-
-static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->apply_dev_quirks)
- return hba->vops->apply_dev_quirks(hba);
- return 0;
-}
-
-static inline void ufshcd_vops_fixup_dev_quirks(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->fixup_dev_quirks)
- hba->vops->fixup_dev_quirks(hba);
-}
-
-static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op,
- enum ufs_notify_change_status status)
-{
- if (hba->vops && hba->vops->suspend)
- return hba->vops->suspend(hba, op, status);
-
- return 0;
-}
-
-static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
-{
- if (hba->vops && hba->vops->resume)
- return hba->vops->resume(hba, op);
-
- return 0;
-}
-
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->dbg_register_dump)
- hba->vops->dbg_register_dump(hba);
-}
-
-static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
-{
- if (hba->vops && hba->vops->device_reset)
- return hba->vops->device_reset(hba);
-
- return -EOPNOTSUPP;
-}
-
-static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
- struct devfreq_dev_profile
- *profile, void *data)
-{
- if (hba->vops && hba->vops->config_scaling_param)
- hba->vops->config_scaling_param(hba, profile, data);
-}
-
-extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
-
-/*
- * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
- * @scsi_lun: scsi LUN id
- *
- * Returns UPIU LUN id
- */
-static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
-{
- if (scsi_is_wlun(scsi_lun))
- return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
- | UFS_UPIU_WLUN_ID;
- else
- return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
-}
-
-int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix);
-
-int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
-int ufshcd_write_ee_control(struct ufs_hba *hba);
-int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask,
- u16 set, u16 clr);
-
-static inline int ufshcd_update_ee_drv_mask(struct ufs_hba *hba,
- u16 set, u16 clr)
-{
- return ufshcd_update_ee_control(hba, &hba->ee_drv_mask,
- &hba->ee_usr_mask, set, clr);
-}
-
-static inline int ufshcd_update_ee_usr_mask(struct ufs_hba *hba,
- u16 set, u16 clr)
-{
- return ufshcd_update_ee_control(hba, &hba->ee_usr_mask,
- &hba->ee_drv_mask, set, clr);
-}
-
-static inline int ufshcd_rpm_get_sync(struct ufs_hba *hba)
-{
- return pm_runtime_get_sync(&hba->sdev_ufs_device->sdev_gendev);
-}
-
-static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
-{
- return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
-}
-
-static inline int ufshcd_rpm_put(struct ufs_hba *hba)
-{
- return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
-}
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci-dwc.h b/drivers/scsi/ufs/ufshci-dwc.h
deleted file mode 100644
index 6c290e272106..000000000000
--- a/drivers/scsi/ufs/ufshci-dwc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * UFS Host driver for Synopsys Designware Core
- *
- * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
- *
- * Authors: Joao Pinto <jpinto@synopsys.com>
- */
-
-#ifndef _UFSHCI_DWC_H
-#define _UFSHCI_DWC_H
-
-/* DWC HC UFSHCI specific Registers */
-enum dwc_specific_registers {
- DWC_UFS_REG_HCLKDIV = 0xFC,
-};
-
-/* Clock Divider Values: Hex equivalent of frequency in MHz */
-enum clk_div_values {
- DWC_UFS_REG_HCLKDIV_DIV_62_5 = 0x3e,
- DWC_UFS_REG_HCLKDIV_DIV_125 = 0x7d,
- DWC_UFS_REG_HCLKDIV_DIV_200 = 0xc8,
-};
-
-/* Selector Index */
-enum selector_index {
- SELIND_LN0_TX = 0x00,
- SELIND_LN1_TX = 0x01,
- SELIND_LN0_RX = 0x04,
- SELIND_LN1_RX = 0x05,
-};
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
deleted file mode 100644
index 6a295c88d850..000000000000
--- a/drivers/scsi/ufs/ufshci.h
+++ /dev/null
@@ -1,507 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Universal Flash Storage Host controller driver
- * Copyright (C) 2011-2013 Samsung India Software Operations
- *
- * Authors:
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
- */
-
-#ifndef _UFSHCI_H
-#define _UFSHCI_H
-
-enum {
- TASK_REQ_UPIU_SIZE_DWORDS = 8,
- TASK_RSP_UPIU_SIZE_DWORDS = 8,
- ALIGNED_UPIU_SIZE = 512,
-};
-
-/* UFSHCI Registers */
-enum {
- REG_CONTROLLER_CAPABILITIES = 0x00,
- REG_UFS_VERSION = 0x08,
- REG_CONTROLLER_DEV_ID = 0x10,
- REG_CONTROLLER_PROD_ID = 0x14,
- REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18,
- REG_INTERRUPT_STATUS = 0x20,
- REG_INTERRUPT_ENABLE = 0x24,
- REG_CONTROLLER_STATUS = 0x30,
- REG_CONTROLLER_ENABLE = 0x34,
- REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER = 0x38,
- REG_UIC_ERROR_CODE_DATA_LINK_LAYER = 0x3C,
- REG_UIC_ERROR_CODE_NETWORK_LAYER = 0x40,
- REG_UIC_ERROR_CODE_TRANSPORT_LAYER = 0x44,
- REG_UIC_ERROR_CODE_DME = 0x48,
- REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL = 0x4C,
- REG_UTP_TRANSFER_REQ_LIST_BASE_L = 0x50,
- REG_UTP_TRANSFER_REQ_LIST_BASE_H = 0x54,
- REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
- REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
- REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
- REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
- REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
- REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
- REG_UTP_TASK_REQ_LIST_CLEAR = 0x7C,
- REG_UTP_TASK_REQ_LIST_RUN_STOP = 0x80,
- REG_UIC_COMMAND = 0x90,
- REG_UIC_COMMAND_ARG_1 = 0x94,
- REG_UIC_COMMAND_ARG_2 = 0x98,
- REG_UIC_COMMAND_ARG_3 = 0x9C,
-
- UFSHCI_REG_SPACE_SIZE = 0xA0,
-
- REG_UFS_CCAP = 0x100,
- REG_UFS_CRYPTOCAP = 0x104,
-
- UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
-};
-
-/* Controller capability masks */
-enum {
- MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
- MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
- MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,
- MASK_64_ADDRESSING_SUPPORT = 0x01000000,
- MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
- MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
- MASK_CRYPTO_SUPPORT = 0x10000000,
-};
-
-#define UFS_MASK(mask, offset) ((mask) << (offset))
-
-/* UFS Version 08h */
-#define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0)
-#define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16)
-
-/*
- * Controller UFSHCI version
- * - 2.x and newer use the following scheme:
- * major << 8 + minor << 4
- * - 1.x has been converted to match this in
- * ufshcd_get_ufs_version()
- */
-static inline u32 ufshci_version(u32 major, u32 minor)
-{
- return (major << 8) + (minor << 4);
-}
-
-/*
- * HCDDID - Host Controller Identification Descriptor
- * - Device ID and Device Class 10h
- */
-#define DEVICE_CLASS UFS_MASK(0xFFFF, 0)
-#define DEVICE_ID UFS_MASK(0xFF, 24)
-
-/*
- * HCPMID - Host Controller Identification Descriptor
- * - Product/Manufacturer ID 14h
- */
-#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
-#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
-
-/* AHIT - Auto-Hibernate Idle Timer */
-#define UFSHCI_AHIBERN8_TIMER_MASK GENMASK(9, 0)
-#define UFSHCI_AHIBERN8_SCALE_MASK GENMASK(12, 10)
-#define UFSHCI_AHIBERN8_SCALE_FACTOR 10
-#define UFSHCI_AHIBERN8_MAX (1023 * 100000)
-
-/*
- * IS - Interrupt Status - 20h
- */
-#define UTP_TRANSFER_REQ_COMPL 0x1
-#define UIC_DME_END_PT_RESET 0x2
-#define UIC_ERROR 0x4
-#define UIC_TEST_MODE 0x8
-#define UIC_POWER_MODE 0x10
-#define UIC_HIBERNATE_EXIT 0x20
-#define UIC_HIBERNATE_ENTER 0x40
-#define UIC_LINK_LOST 0x80
-#define UIC_LINK_STARTUP 0x100
-#define UTP_TASK_REQ_COMPL 0x200
-#define UIC_COMMAND_COMPL 0x400
-#define DEVICE_FATAL_ERROR 0x800
-#define CONTROLLER_FATAL_ERROR 0x10000
-#define SYSTEM_BUS_FATAL_ERROR 0x20000
-#define CRYPTO_ENGINE_FATAL_ERROR 0x40000
-
-#define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\
- UIC_HIBERNATE_EXIT)
-
-#define UFSHCD_UIC_PWR_MASK (UFSHCD_UIC_HIBERN8_MASK |\
- UIC_POWER_MODE)
-
-#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
-
-#define UFSHCD_ERROR_MASK (UIC_ERROR |\
- DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
-
-#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
- CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
-
-/* HCS - Host Controller Status 30h */
-#define DEVICE_PRESENT 0x1
-#define UTP_TRANSFER_REQ_LIST_READY 0x2
-#define UTP_TASK_REQ_LIST_READY 0x4
-#define UIC_COMMAND_READY 0x8
-#define HOST_ERROR_INDICATOR 0x10
-#define DEVICE_ERROR_INDICATOR 0x20
-#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
-
-#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
- UTP_TASK_REQ_LIST_READY |\
- UIC_COMMAND_READY)
-
-enum {
- PWR_OK = 0x0,
- PWR_LOCAL = 0x01,
- PWR_REMOTE = 0x02,
- PWR_BUSY = 0x03,
- PWR_ERROR_CAP = 0x04,
- PWR_FATAL_ERROR = 0x05,
-};
-
-/* HCE - Host Controller Enable 34h */
-#define CONTROLLER_ENABLE 0x1
-#define CONTROLLER_DISABLE 0x0
-#define CRYPTO_GENERAL_ENABLE 0x2
-
-/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
-#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
-#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
-#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
-#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR 0x10
-
-/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
-#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
-#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
-#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
-#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
-#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
-#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
-#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
-#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
-#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
-
-/* UECN - Host UIC Error Code Network Layer 40h */
-#define UIC_NETWORK_LAYER_ERROR 0x80000000
-#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
-#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
-#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
-#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
-
-/* UECT - Host UIC Error Code Transport Layer 44h */
-#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
-#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
-#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
-#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
-#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
-#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
-#define UIC_TRANSPORT_BAD_TC 0x10
-#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
-#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
-
-/* UECDME - Host UIC Error Code DME 48h */
-#define UIC_DME_ERROR 0x80000000
-#define UIC_DME_ERROR_CODE_MASK 0x1
-
-/* UTRIACR - Interrupt Aggregation control register - 0x4Ch */
-#define INT_AGGR_TIMEOUT_VAL_MASK 0xFF
-#define INT_AGGR_COUNTER_THRESHOLD_MASK UFS_MASK(0x1F, 8)
-#define INT_AGGR_COUNTER_AND_TIMER_RESET 0x10000
-#define INT_AGGR_STATUS_BIT 0x100000
-#define INT_AGGR_PARAM_WRITE 0x1000000
-#define INT_AGGR_ENABLE 0x80000000
-
-/* UTRLRSR - UTP Transfer Request Run-Stop Register 60h */
-#define UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT 0x1
-
-/* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */
-#define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1
-
-/* UICCMD - UIC Command */
-#define COMMAND_OPCODE_MASK 0xFF
-#define GEN_SELECTOR_INDEX_MASK 0xFFFF
-
-#define MIB_ATTRIBUTE_MASK UFS_MASK(0xFFFF, 16)
-#define RESET_LEVEL 0xFF
-
-#define ATTR_SET_TYPE_MASK UFS_MASK(0xFF, 16)
-#define CONFIG_RESULT_CODE_MASK 0xFF
-#define GENERIC_ERROR_CODE_MASK 0xFF
-
-/* GenSelectorIndex calculation macros for M-PHY attributes */
-#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
-#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
-
-#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
- ((sel) & 0xFFFF))
-#define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0)
-#define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16)
-#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
-
-/* Link Status*/
-enum link_status {
- UFSHCD_LINK_IS_DOWN = 1,
- UFSHCD_LINK_IS_UP = 2,
-};
-
-/* UIC Commands */
-enum uic_cmd_dme {
- UIC_CMD_DME_GET = 0x01,
- UIC_CMD_DME_SET = 0x02,
- UIC_CMD_DME_PEER_GET = 0x03,
- UIC_CMD_DME_PEER_SET = 0x04,
- UIC_CMD_DME_POWERON = 0x10,
- UIC_CMD_DME_POWEROFF = 0x11,
- UIC_CMD_DME_ENABLE = 0x12,
- UIC_CMD_DME_RESET = 0x14,
- UIC_CMD_DME_END_PT_RST = 0x15,
- UIC_CMD_DME_LINK_STARTUP = 0x16,
- UIC_CMD_DME_HIBER_ENTER = 0x17,
- UIC_CMD_DME_HIBER_EXIT = 0x18,
- UIC_CMD_DME_TEST_MODE = 0x1A,
-};
-
-/* UIC Config result code / Generic error code */
-enum {
- UIC_CMD_RESULT_SUCCESS = 0x00,
- UIC_CMD_RESULT_INVALID_ATTR = 0x01,
- UIC_CMD_RESULT_FAILURE = 0x01,
- UIC_CMD_RESULT_INVALID_ATTR_VALUE = 0x02,
- UIC_CMD_RESULT_READ_ONLY_ATTR = 0x03,
- UIC_CMD_RESULT_WRITE_ONLY_ATTR = 0x04,
- UIC_CMD_RESULT_BAD_INDEX = 0x05,
- UIC_CMD_RESULT_LOCKED_ATTR = 0x06,
- UIC_CMD_RESULT_BAD_TEST_FEATURE_INDEX = 0x07,
- UIC_CMD_RESULT_PEER_COMM_FAILURE = 0x08,
- UIC_CMD_RESULT_BUSY = 0x09,
- UIC_CMD_RESULT_DME_FAILURE = 0x0A,
-};
-
-#define MASK_UIC_COMMAND_RESULT 0xFF
-
-#define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8)
-#define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0)
-
-/* Interrupt disable masks */
-enum {
- /* Interrupt disable mask for UFSHCI v1.0 */
- INTERRUPT_MASK_ALL_VER_10 = 0x30FFF,
- INTERRUPT_MASK_RW_VER_10 = 0x30000,
-
- /* Interrupt disable mask for UFSHCI v1.1 */
- INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
-
- /* Interrupt disable mask for UFSHCI v2.1 */
- INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
-};
-
-/* CCAP - Crypto Capability 100h */
-union ufs_crypto_capabilities {
- __le32 reg_val;
- struct {
- u8 num_crypto_cap;
- u8 config_count;
- u8 reserved;
- u8 config_array_ptr;
- };
-};
-
-enum ufs_crypto_key_size {
- UFS_CRYPTO_KEY_SIZE_INVALID = 0x0,
- UFS_CRYPTO_KEY_SIZE_128 = 0x1,
- UFS_CRYPTO_KEY_SIZE_192 = 0x2,
- UFS_CRYPTO_KEY_SIZE_256 = 0x3,
- UFS_CRYPTO_KEY_SIZE_512 = 0x4,
-};
-
-enum ufs_crypto_alg {
- UFS_CRYPTO_ALG_AES_XTS = 0x0,
- UFS_CRYPTO_ALG_BITLOCKER_AES_CBC = 0x1,
- UFS_CRYPTO_ALG_AES_ECB = 0x2,
- UFS_CRYPTO_ALG_ESSIV_AES_CBC = 0x3,
-};
-
-/* x-CRYPTOCAP - Crypto Capability X */
-union ufs_crypto_cap_entry {
- __le32 reg_val;
- struct {
- u8 algorithm_id;
- u8 sdus_mask; /* Supported data unit size mask */
- u8 key_size;
- u8 reserved;
- };
-};
-
-#define UFS_CRYPTO_CONFIGURATION_ENABLE (1 << 7)
-#define UFS_CRYPTO_KEY_MAX_SIZE 64
-/* x-CRYPTOCFG - Crypto Configuration X */
-union ufs_crypto_cfg_entry {
- __le32 reg_val[32];
- struct {
- u8 crypto_key[UFS_CRYPTO_KEY_MAX_SIZE];
- u8 data_unit_size;
- u8 crypto_cap_idx;
- u8 reserved_1;
- u8 config_enable;
- u8 reserved_multi_host;
- u8 reserved_2;
- u8 vsb[2];
- u8 reserved_3[56];
- };
-};
-
-/*
- * Request Descriptor Definitions
- */
-
-/* Transfer request command type */
-enum {
- UTP_CMD_TYPE_SCSI = 0x0,
- UTP_CMD_TYPE_UFS = 0x1,
- UTP_CMD_TYPE_DEV_MANAGE = 0x2,
-};
-
-/* To accommodate UFS2.0 required Command type */
-enum {
- UTP_CMD_TYPE_UFS_STORAGE = 0x1,
-};
-
-enum {
- UTP_SCSI_COMMAND = 0x00000000,
- UTP_NATIVE_UFS_COMMAND = 0x10000000,
- UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
- UTP_REQ_DESC_INT_CMD = 0x01000000,
- UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
-};
-
-/* UTP Transfer Request Data Direction (DD) */
-enum {
- UTP_NO_DATA_TRANSFER = 0x00000000,
- UTP_HOST_TO_DEVICE = 0x02000000,
- UTP_DEVICE_TO_HOST = 0x04000000,
-};
-
-/* Overall command status values */
-enum utp_ocs {
- OCS_SUCCESS = 0x0,
- OCS_INVALID_CMD_TABLE_ATTR = 0x1,
- OCS_INVALID_PRDT_ATTR = 0x2,
- OCS_MISMATCH_DATA_BUF_SIZE = 0x3,
- OCS_MISMATCH_RESP_UPIU_SIZE = 0x4,
- OCS_PEER_COMM_FAILURE = 0x5,
- OCS_ABORTED = 0x6,
- OCS_FATAL_ERROR = 0x7,
- OCS_DEVICE_FATAL_ERROR = 0x8,
- OCS_INVALID_CRYPTO_CONFIG = 0x9,
- OCS_GENERAL_CRYPTO_ERROR = 0xA,
- OCS_INVALID_COMMAND_STATUS = 0x0F,
-};
-
-enum {
- MASK_OCS = 0x0F,
-};
-
-/* The maximum length of the data byte count field in the PRDT is 256KB */
-#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024)
-/* The granularity of the data byte count field in the PRDT is 32-bit */
-#define PRDT_DATA_BYTE_COUNT_PAD 4
-
-/**
- * struct ufshcd_sg_entry - UFSHCI PRD Entry
- * @addr: Physical address; DW-0 and DW-1.
- * @reserved: Reserved for future use DW-2
- * @size: size of physical segment DW-3
- */
-struct ufshcd_sg_entry {
- __le64 addr;
- __le32 reserved;
- __le32 size;
-};
-
-/**
- * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD)
- * @command_upiu: Command UPIU Frame address
- * @response_upiu: Response UPIU Frame address
- * @prd_table: Physical Region Descriptor
- */
-struct utp_transfer_cmd_desc {
- u8 command_upiu[ALIGNED_UPIU_SIZE];
- u8 response_upiu[ALIGNED_UPIU_SIZE];
- struct ufshcd_sg_entry prd_table[SG_ALL];
-};
-
-/**
- * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
- * @dword0: Descriptor Header DW0
- * @dword1: Descriptor Header DW1
- * @dword2: Descriptor Header DW2
- * @dword3: Descriptor Header DW3
- */
-struct request_desc_header {
- __le32 dword_0;
- __le32 dword_1;
- __le32 dword_2;
- __le32 dword_3;
-};
-
-/**
- * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
- * @header: UTRD header DW-0 to DW-3
- * @command_desc_base_addr_lo: UCD base address low DW-4
- * @command_desc_base_addr_hi: UCD base address high DW-5
- * @response_upiu_length: response UPIU length DW-6
- * @response_upiu_offset: response UPIU offset DW-6
- * @prd_table_length: Physical region descriptor length DW-7
- * @prd_table_offset: Physical region descriptor offset DW-7
- */
-struct utp_transfer_req_desc {
-
- /* DW 0-3 */
- struct request_desc_header header;
-
- /* DW 4-5*/
- __le32 command_desc_base_addr_lo;
- __le32 command_desc_base_addr_hi;
-
- /* DW 6 */
- __le16 response_upiu_length;
- __le16 response_upiu_offset;
-
- /* DW 7 */
- __le16 prd_table_length;
- __le16 prd_table_offset;
-};
-
-/*
- * UTMRD structure.
- */
-struct utp_task_req_desc {
- /* DW 0-3 */
- struct request_desc_header header;
-
- /* DW 4-11 - Task request UPIU structure */
- struct {
- struct utp_upiu_header req_header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 __reserved1[2];
- } upiu_req;
-
- /* DW 12-19 - Task Management Response UPIU structure */
- struct {
- struct utp_upiu_header rsp_header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 __reserved2[3];
- } upiu_rsp;
-};
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
deleted file mode 100644
index ded5ba9b1466..000000000000
--- a/drivers/scsi/ufs/ufshpb.c
+++ /dev/null
@@ -1,2653 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Universal Flash Storage Host Performance Booster
- *
- * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Yongmyung Lee <ymhungry.lee@samsung.com>
- * Jinyoung Choi <j-young.choi@samsung.com>
- */
-
-#include <asm/unaligned.h>
-#include <linux/async.h>
-
-#include "ufshcd.h"
-#include "ufshpb.h"
-#include "../sd.h"
-
-#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
-#define READ_TO_MS 1000
-#define READ_TO_EXPIRIES 100
-#define POLLING_INTERVAL_MS 200
-#define THROTTLE_MAP_REQ_DEFAULT 1
-
-/* memory management */
-static struct kmem_cache *ufshpb_mctx_cache;
-static mempool_t *ufshpb_mctx_pool;
-static mempool_t *ufshpb_page_pool;
-/* A cache size of 2MB can cache ppn in the 1GB range. */
-static unsigned int ufshpb_host_map_kbytes = 2048;
-static int tot_active_srgn_pages;
-
-static struct workqueue_struct *ufshpb_wq;
-
-static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx);
-
-bool ufshpb_is_allowed(struct ufs_hba *hba)
-{
- return !(hba->ufshpb_dev.hpb_disabled);
-}
-
-/* HPB version 1.0 is called as legacy version. */
-bool ufshpb_is_legacy(struct ufs_hba *hba)
-{
- return hba->ufshpb_dev.is_legacy;
-}
-
-static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
-{
- return sdev->hostdata;
-}
-
-static int ufshpb_get_state(struct ufshpb_lu *hpb)
-{
- return atomic_read(&hpb->hpb_state);
-}
-
-static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
-{
- atomic_set(&hpb->hpb_state, state);
-}
-
-static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- return rgn->rgn_state != HPB_RGN_INACTIVE &&
- srgn->srgn_state == HPB_SRGN_VALID;
-}
-
-static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
-{
- return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
-}
-
-static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
-{
- return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
- op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
-}
-
-static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
-{
- return transfer_len <= hpb->pre_req_max_tr_len;
-}
-
-static bool ufshpb_is_general_lun(int lun)
-{
- return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
-}
-
-static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
-{
- if (hpb->lu_pinned_end != PINNED_NOT_SET &&
- rgn_idx >= hpb->lu_pinned_start &&
- rgn_idx <= hpb->lu_pinned_end)
- return true;
-
- return false;
-}
-
-static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
-{
- bool ret = false;
- unsigned long flags;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT)
- return;
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
- ret = true;
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- if (ret)
- queue_work(ufshpb_wq, &hpb->map_work);
-}
-
-static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp,
- struct utp_hpb_rsp *rsp_field)
-{
- /* Check HPB_UPDATE_ALERT */
- if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
- UPIU_HEADER_DWORD(0, 2, 0, 0)))
- return false;
-
- if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
- rsp_field->desc_type != DEV_DES_TYPE ||
- rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
- rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
- rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
- rsp_field->hpb_op == HPB_RSP_NONE ||
- (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
- !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
- return false;
-
- if (!ufshpb_is_general_lun(rsp_field->lun)) {
- dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
- lrbp->lun);
- return false;
- }
-
- return true;
-}
-
-static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
- int srgn_offset, int cnt, bool set_dirty)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn, *prev_srgn = NULL;
- int set_bit_len;
- int bitmap_len;
- unsigned long flags;
-
-next_srgn:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (likely(!srgn->is_last))
- bitmap_len = hpb->entries_per_srgn;
- else
- bitmap_len = hpb->last_srgn_entries;
-
- if ((srgn_offset + cnt) > bitmap_len)
- set_bit_len = bitmap_len - srgn_offset;
- else
- set_bit_len = cnt;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- if (set_dirty) {
- if (srgn->srgn_state == HPB_SRGN_VALID)
- bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
- set_bit_len);
- } else if (hpb->is_hcm) {
- /* rewind the read timer for lru regions */
- rgn->read_timeout = ktime_add_ms(ktime_get(),
- rgn->hpb->params.read_timeout_ms);
- rgn->read_timeout_expiries =
- rgn->hpb->params.read_timeout_expiries;
- }
- }
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- if (hpb->is_hcm && prev_srgn != srgn) {
- bool activate = false;
-
- spin_lock(&rgn->rgn_lock);
- if (set_dirty) {
- rgn->reads -= srgn->reads;
- srgn->reads = 0;
- set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
- } else {
- srgn->reads++;
- rgn->reads++;
- if (srgn->reads == hpb->params.activation_thld)
- activate = true;
- }
- spin_unlock(&rgn->rgn_lock);
-
- if (activate ||
- test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "activate region %d-%d\n", rgn_idx, srgn_idx);
- }
-
- prev_srgn = srgn;
- }
-
- srgn_offset = 0;
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
-
- cnt -= set_bit_len;
- if (cnt > 0)
- goto next_srgn;
-}
-
-static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx, int srgn_offset, int cnt)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int bitmap_len;
- int bit_len;
-
-next_srgn:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (likely(!srgn->is_last))
- bitmap_len = hpb->entries_per_srgn;
- else
- bitmap_len = hpb->last_srgn_entries;
-
- if (!ufshpb_is_valid_srgn(rgn, srgn))
- return true;
-
- /*
- * If the region state is active, mctx must be allocated.
- * In this case, check whether the region is evicted or
- * mctx allocation fail.
- */
- if (unlikely(!srgn->mctx)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- return true;
- }
-
- if ((srgn_offset + cnt) > bitmap_len)
- bit_len = bitmap_len - srgn_offset;
- else
- bit_len = cnt;
-
- if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
- srgn_offset) < bit_len + srgn_offset)
- return true;
-
- srgn_offset = 0;
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
-
- cnt -= bit_len;
- if (cnt > 0)
- goto next_srgn;
-
- return false;
-}
-
-static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
-{
- return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
-}
-
-static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
- struct ufshpb_map_ctx *mctx, int pos,
- int len, __be64 *ppn_buf)
-{
- struct page *page;
- int index, offset;
- int copied;
-
- index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
- offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
-
- if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
- copied = len;
- else
- copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
-
- page = mctx->m_page[index];
- if (unlikely(!page)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "error. cannot find page in mctx\n");
- return -ENOMEM;
- }
-
- memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
- copied * HPB_ENTRY_SIZE);
-
- return copied;
-}
-
-static void
-ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
- int *srgn_idx, int *offset)
-{
- int rgn_offset;
-
- *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
- rgn_offset = lpn & hpb->entries_per_rgn_mask;
- *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
- *offset = rgn_offset & hpb->entries_per_srgn_mask;
-}
-
-static void
-ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- __be64 ppn, u8 transfer_len)
-{
- unsigned char *cdb = lrbp->cmd->cmnd;
- __be64 ppn_tmp = ppn;
- cdb[0] = UFSHPB_READ;
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
- ppn_tmp = (__force __be64)swab64((__force u64)ppn);
-
- /* ppn value is stored as big-endian in the host memory */
- memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
- cdb[14] = transfer_len;
- cdb[15] = 0;
-
- lrbp->cmd->cmd_len = UFS_CDB_SIZE;
-}
-
-/*
- * This function will set up HPB read command using host-side L2P map data.
- */
-int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshpb_lu *hpb;
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- struct scsi_cmnd *cmd = lrbp->cmd;
- u32 lpn;
- __be64 ppn;
- unsigned long flags;
- int transfer_len, rgn_idx, srgn_idx, srgn_offset;
- int err = 0;
-
- hpb = ufshpb_get_hpb_data(cmd->device);
- if (!hpb)
- return -ENODEV;
-
- if (ufshpb_get_state(hpb) == HPB_INIT)
- return -ENODEV;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT", __func__);
- return -ENODEV;
- }
-
- if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
- (!ufshpb_is_write_or_discard(cmd) &&
- !ufshpb_is_read_cmd(cmd)))
- return 0;
-
- transfer_len = sectors_to_logical(cmd->device,
- blk_rq_sectors(scsi_cmd_to_rq(cmd)));
- if (unlikely(!transfer_len))
- return 0;
-
- lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
- ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- /* If command type is WRITE or DISCARD, set bitmap as drity */
- if (ufshpb_is_write_or_discard(cmd)) {
- ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len, true);
- return 0;
- }
-
- if (!ufshpb_is_supported_chunk(hpb, transfer_len))
- return 0;
-
- if (hpb->is_hcm) {
- /*
- * in host control mode, reads are the main source for
- * activation trials.
- */
- ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len, false);
-
- /* keep those counters normalized */
- if (rgn->reads > hpb->entries_per_srgn)
- schedule_work(&hpb->ufshpb_normalization_work);
- }
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len)) {
- hpb->stats.miss_cnt++;
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return 0;
- }
-
- err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- if (unlikely(err < 0)) {
- /*
- * In this case, the region state is active,
- * but the ppn table is not allocated.
- * Make sure that ppn table must be allocated on
- * active state.
- */
- dev_err(hba->dev, "get ppn failed. err %d\n", err);
- return err;
- }
-
- ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
-
- hpb->stats.hit_cnt++;
- return 0;
-}
-
-static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
- int rgn_idx, enum req_opf dir,
- bool atomic)
-{
- struct ufshpb_req *rq;
- struct request *req;
- int retries = HPB_MAP_REQ_RETRIES;
-
- rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
- if (!rq)
- return NULL;
-
-retry:
- req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
- BLK_MQ_REQ_NOWAIT);
-
- if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
- usleep_range(3000, 3100);
- goto retry;
- }
-
- if (IS_ERR(req))
- goto free_rq;
-
- rq->hpb = hpb;
- rq->req = req;
- rq->rb.rgn_idx = rgn_idx;
-
- return rq;
-
-free_rq:
- kmem_cache_free(hpb->map_req_cache, rq);
- return NULL;
-}
-
-static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
-{
- blk_mq_free_request(rq->req);
- kmem_cache_free(hpb->map_req_cache, rq);
-}
-
-static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_req *map_req;
- struct bio *bio;
- unsigned long flags;
-
- if (hpb->is_hcm &&
- hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
- dev_info(&hpb->sdev_ufs_lu->sdev_dev,
- "map_req throttle. inflight %d throttle %d",
- hpb->num_inflight_map_req,
- hpb->params.inflight_map_req);
- return NULL;
- }
-
- map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
- if (!map_req)
- return NULL;
-
- bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
- if (!bio) {
- ufshpb_put_req(hpb, map_req);
- return NULL;
- }
-
- map_req->bio = bio;
-
- map_req->rb.srgn_idx = srgn->srgn_idx;
- map_req->rb.mctx = srgn->mctx;
-
- spin_lock_irqsave(&hpb->param_lock, flags);
- hpb->num_inflight_map_req++;
- spin_unlock_irqrestore(&hpb->param_lock, flags);
-
- return map_req;
-}
-
-static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *map_req)
-{
- unsigned long flags;
-
- bio_put(map_req->bio);
- ufshpb_put_req(hpb, map_req);
-
- spin_lock_irqsave(&hpb->param_lock, flags);
- hpb->num_inflight_map_req--;
- spin_unlock_irqrestore(&hpb->param_lock, flags);
-}
-
-static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_region *rgn;
- u32 num_entries = hpb->entries_per_srgn;
-
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- return -1;
- }
-
- if (unlikely(srgn->is_last))
- num_entries = hpb->last_srgn_entries;
-
- bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
- clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
-
- return 0;
-}
-
-static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
-
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- list_del_init(&rgn->list_inact_rgn);
-
- if (list_empty(&srgn->list_act_srgn))
- list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
-
- hpb->stats.rb_active_cnt++;
-}
-
-static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- rgn = hpb->rgn_tbl + rgn_idx;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- list_del_init(&srgn->list_act_srgn);
-
- if (list_empty(&rgn->list_inact_rgn))
- list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
-
- hpb->stats.rb_inactive_cnt++;
-}
-
-static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_region *rgn;
-
- /*
- * If there is no mctx in subregion
- * after I/O progress for HPB_READ_BUFFER, the region to which the
- * subregion belongs was evicted.
- * Make sure the region must not evict in I/O progress
- */
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- srgn->srgn_state = HPB_SRGN_INVALID;
- return;
- }
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
-
- if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "region %d subregion %d evicted\n",
- srgn->rgn_idx, srgn->srgn_idx);
- srgn->srgn_state = HPB_SRGN_INVALID;
- return;
- }
- srgn->srgn_state = HPB_SRGN_VALID;
-}
-
-static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
-{
- struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
-
- ufshpb_put_req(umap_req->hpb, umap_req);
-}
-
-static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
-{
- struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
- struct ufshpb_lu *hpb = map_req->hpb;
- struct ufshpb_subregion *srgn;
- unsigned long flags;
-
- srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
- map_req->rb.srgn_idx;
-
- ufshpb_clear_dirty_bitmap(hpb, srgn);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_activate_subregion(hpb, srgn);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- ufshpb_put_map_req(map_req->hpb, map_req);
-}
-
-static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
-{
- cdb[0] = UFSHPB_WRITE_BUFFER;
- cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
- UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
- if (rgn)
- put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
- cdb[9] = 0x00;
-}
-
-static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
- int srgn_idx, int srgn_mem_size)
-{
- cdb[0] = UFSHPB_READ_BUFFER;
- cdb[1] = UFSHPB_READ_BUFFER_ID;
-
- put_unaligned_be16(rgn_idx, &cdb[2]);
- put_unaligned_be16(srgn_idx, &cdb[4]);
- put_unaligned_be24(srgn_mem_size, &cdb[6]);
-
- cdb[9] = 0x00;
-}
-
-static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *umap_req,
- struct ufshpb_region *rgn)
-{
- struct request *req;
- struct scsi_request *rq;
-
- req = umap_req->req;
- req->timeout = 0;
- req->end_io_data = (void *)umap_req;
- rq = scsi_req(req);
- ufshpb_set_unmap_cmd(rq->cmd, rgn);
- rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
-
- blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
-
- hpb->stats.umap_req_cnt++;
-}
-
-static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *map_req, bool last)
-{
- struct request_queue *q;
- struct request *req;
- struct scsi_request *rq;
- int mem_size = hpb->srgn_mem_size;
- int ret = 0;
- int i;
-
- q = hpb->sdev_ufs_lu->request_queue;
- for (i = 0; i < hpb->pages_per_srgn; i++) {
- ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
- PAGE_SIZE, 0);
- if (ret != PAGE_SIZE) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "bio_add_pc_page fail %d - %d\n",
- map_req->rb.rgn_idx, map_req->rb.srgn_idx);
- return ret;
- }
- }
-
- req = map_req->req;
-
- blk_rq_append_bio(req, map_req->bio);
-
- req->end_io_data = map_req;
-
- rq = scsi_req(req);
-
- if (unlikely(last))
- mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
-
- ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
- map_req->rb.srgn_idx, mem_size);
- rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
-
- blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
-
- hpb->stats.map_req_cnt++;
- return 0;
-}
-
-static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
- bool last)
-{
- struct ufshpb_map_ctx *mctx;
- u32 num_entries = hpb->entries_per_srgn;
- int i, j;
-
- mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
- if (!mctx)
- return NULL;
-
- mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
- if (!mctx->m_page)
- goto release_mctx;
-
- if (unlikely(last))
- num_entries = hpb->last_srgn_entries;
-
- mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
- if (!mctx->ppn_dirty)
- goto release_m_page;
-
- for (i = 0; i < hpb->pages_per_srgn; i++) {
- mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
- if (!mctx->m_page[i]) {
- for (j = 0; j < i; j++)
- mempool_free(mctx->m_page[j], ufshpb_page_pool);
- goto release_ppn_dirty;
- }
- clear_page(page_address(mctx->m_page[i]));
- }
-
- return mctx;
-
-release_ppn_dirty:
- bitmap_free(mctx->ppn_dirty);
-release_m_page:
- kmem_cache_free(hpb->m_page_cache, mctx->m_page);
-release_mctx:
- mempool_free(mctx, ufshpb_mctx_pool);
- return NULL;
-}
-
-static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
- struct ufshpb_map_ctx *mctx)
-{
- int i;
-
- for (i = 0; i < hpb->pages_per_srgn; i++)
- mempool_free(mctx->m_page[i], ufshpb_page_pool);
-
- bitmap_free(mctx->ppn_dirty);
- kmem_cache_free(hpb->m_page_cache, mctx->m_page);
- mempool_free(mctx, ufshpb_mctx_pool);
-}
-
-static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (srgn->srgn_state == HPB_SRGN_ISSUED)
- return -EPERM;
-
- return 0;
-}
-
-static void ufshpb_read_to_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
- ufshpb_read_to_work.work);
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn, *next_rgn;
- unsigned long flags;
- unsigned int poll;
- LIST_HEAD(expired_list);
-
- if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
- return;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
- list_lru_rgn) {
- bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
-
- if (timedout) {
- rgn->read_timeout_expiries--;
- if (is_rgn_dirty(rgn) ||
- rgn->read_timeout_expiries == 0)
- list_add(&rgn->list_expired_rgn, &expired_list);
- else
- rgn->read_timeout = ktime_add_ms(ktime_get(),
- hpb->params.read_timeout_ms);
- }
- }
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry_safe(rgn, next_rgn, &expired_list,
- list_expired_rgn) {
- list_del_init(&rgn->list_expired_rgn);
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- }
-
- ufshpb_kick_map_work(hpb);
-
- clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
-
- poll = hpb->params.timeout_polling_interval_ms;
- schedule_delayed_work(&hpb->ufshpb_read_to_work,
- msecs_to_jiffies(poll));
-}
-
-static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- rgn->rgn_state = HPB_RGN_ACTIVE;
- list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
- atomic_inc(&lru_info->active_cnt);
- if (rgn->hpb->is_hcm) {
- rgn->read_timeout =
- ktime_add_ms(ktime_get(),
- rgn->hpb->params.read_timeout_ms);
- rgn->read_timeout_expiries =
- rgn->hpb->params.read_timeout_expiries;
- }
-}
-
-static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
-}
-
-static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
-{
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn, *victim_rgn = NULL;
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
- if (!rgn) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: no region allocated\n",
- __func__);
- return NULL;
- }
- if (ufshpb_check_srgns_issue_state(hpb, rgn))
- continue;
-
- /*
- * in host control mode, verify that the exiting region
- * has fewer reads
- */
- if (hpb->is_hcm &&
- rgn->reads > hpb->params.eviction_thld_exit)
- continue;
-
- victim_rgn = rgn;
- break;
- }
-
- return victim_rgn;
-}
-
-static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- list_del_init(&rgn->list_lru_rgn);
- rgn->rgn_state = HPB_RGN_INACTIVE;
- atomic_dec(&lru_info->active_cnt);
-}
-
-static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- if (srgn->srgn_state != HPB_SRGN_UNUSED) {
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- srgn->srgn_state = HPB_SRGN_UNUSED;
- srgn->mctx = NULL;
- }
-}
-
-static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- bool atomic)
-{
- struct ufshpb_req *umap_req;
- int rgn_idx = rgn ? rgn->rgn_idx : 0;
-
- umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
- if (!umap_req)
- return -ENOMEM;
-
- ufshpb_execute_umap_req(hpb, umap_req, rgn);
-
- return 0;
-}
-
-static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- return ufshpb_issue_umap_req(hpb, rgn, true);
-}
-
-static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
-{
- return ufshpb_issue_umap_req(hpb, NULL, false);
-}
-
-static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct victim_select_info *lru_info;
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- lru_info = &hpb->lru_info;
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
-
- ufshpb_cleanup_lru_info(lru_info, rgn);
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- ufshpb_purge_active_subregion(hpb, srgn);
-}
-
-static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (rgn->rgn_state == HPB_RGN_PINNED) {
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "pinned region cannot drop-out. region %d\n",
- rgn->rgn_idx);
- goto out;
- }
-
- if (!list_empty(&rgn->list_lru_rgn)) {
- if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (hpb->is_hcm) {
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- ret = ufshpb_issue_umap_single_req(hpb, rgn);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (ret)
- goto out;
- }
-
- __ufshpb_evict_region(hpb, rgn);
- }
-out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return ret;
-}
-
-static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_req *map_req;
- unsigned long flags;
- int ret;
- int err = -EAGAIN;
- bool alloc_required = false;
- enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT\n", __func__);
- goto unlock_out;
- }
-
- if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
- (srgn->srgn_state == HPB_SRGN_INVALID)) {
- err = 0;
- goto unlock_out;
- }
-
- if (srgn->srgn_state == HPB_SRGN_UNUSED)
- alloc_required = true;
-
- /*
- * If the subregion is already ISSUED state,
- * a specific event (e.g., GC or wear-leveling, etc.) occurs in
- * the device and HPB response for map loading is received.
- * In this case, after finishing the HPB_READ_BUFFER,
- * the next HPB_READ_BUFFER is performed again to obtain the latest
- * map data.
- */
- if (srgn->srgn_state == HPB_SRGN_ISSUED)
- goto unlock_out;
-
- srgn->srgn_state = HPB_SRGN_ISSUED;
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- if (alloc_required) {
- srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "get map_ctx failed. region %d - %d\n",
- rgn->rgn_idx, srgn->srgn_idx);
- state = HPB_SRGN_UNUSED;
- goto change_srgn_state;
- }
- }
-
- map_req = ufshpb_get_map_req(hpb, srgn);
- if (!map_req)
- goto change_srgn_state;
-
-
- ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
- if (ret) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: issue map_req failed: %d, region %d - %d\n",
- __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
- goto free_map_req;
- }
- return 0;
-
-free_map_req:
- ufshpb_put_map_req(hpb, map_req);
-change_srgn_state:
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- srgn->srgn_state = state;
-unlock_out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return err;
-}
-
-static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
-{
- struct ufshpb_region *victim_rgn = NULL;
- struct victim_select_info *lru_info = &hpb->lru_info;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- /*
- * If region belongs to lru_list, just move the region
- * to the front of lru list because the state of the region
- * is already active-state.
- */
- if (!list_empty(&rgn->list_lru_rgn)) {
- ufshpb_hit_lru_info(lru_info, rgn);
- goto out;
- }
-
- if (rgn->rgn_state == HPB_RGN_INACTIVE) {
- if (atomic_read(&lru_info->active_cnt) ==
- lru_info->max_lru_active_cnt) {
- /*
- * If the maximum number of active regions
- * is exceeded, evict the least recently used region.
- * This case may occur when the device responds
- * to the eviction information late.
- * It is okay to evict the least recently used region,
- * because the device could detect this region
- * by not issuing HPB_READ
- *
- * in host control mode, verify that the entering
- * region has enough reads
- */
- if (hpb->is_hcm &&
- rgn->reads < hpb->params.eviction_thld_enter) {
- ret = -EACCES;
- goto out;
- }
-
- victim_rgn = ufshpb_victim_lru_info(hpb);
- if (!victim_rgn) {
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "cannot get victim region %s\n",
- hpb->is_hcm ? "" : "error");
- ret = -ENOMEM;
- goto out;
- }
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "LRU full (%d), choose victim %d\n",
- atomic_read(&lru_info->active_cnt),
- victim_rgn->rgn_idx);
-
- if (hpb->is_hcm) {
- spin_unlock_irqrestore(&hpb->rgn_state_lock,
- flags);
- ret = ufshpb_issue_umap_single_req(hpb,
- victim_rgn);
- spin_lock_irqsave(&hpb->rgn_state_lock,
- flags);
- if (ret)
- goto out;
- }
-
- __ufshpb_evict_region(hpb, victim_rgn);
- }
-
- /*
- * When a region is added to lru_info list_head,
- * it is guaranteed that the subregion has been
- * assigned all mctx. If failed, try to receive mctx again
- * without being added to lru_info list_head
- */
- ufshpb_add_lru_info(lru_info, rgn);
- }
-out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return ret;
-}
-
-static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
- struct utp_hpb_rsp *rsp_field)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int i, rgn_i, srgn_i;
-
- BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
- /*
- * If the active region and the inactive region are the same,
- * we will inactivate this region.
- * The device could check this (region inactivated) and
- * will response the proper active region information
- */
- for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
- rgn_i =
- be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
- srgn_i =
- be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
-
- rgn = hpb->rgn_tbl + rgn_i;
- if (hpb->is_hcm &&
- (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
- /*
- * in host control mode, subregion activation
- * recommendations are only allowed to active regions.
- * Also, ignore recommendations for dirty regions - the
- * host will make decisions concerning those by himself
- */
- continue;
- }
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
-
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_active_info(hpb, rgn_i, srgn_i);
- spin_unlock(&hpb->rsp_list_lock);
-
- srgn = rgn->srgn_tbl + srgn_i;
-
- /* blocking HPB_READ */
- spin_lock(&hpb->rgn_state_lock);
- if (srgn->srgn_state == HPB_SRGN_VALID)
- srgn->srgn_state = HPB_SRGN_INVALID;
- spin_unlock(&hpb->rgn_state_lock);
- }
-
- if (hpb->is_hcm) {
- /*
- * in host control mode the device is not allowed to inactivate
- * regions
- */
- goto out;
- }
-
- for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
- rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "inactivate(%d) region %d\n", i, rgn_i);
-
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_inactive_info(hpb, rgn_i);
- spin_unlock(&hpb->rsp_list_lock);
-
- rgn = hpb->rgn_tbl + rgn_i;
-
- spin_lock(&hpb->rgn_state_lock);
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
- srgn = rgn->srgn_tbl + srgn_i;
- if (srgn->srgn_state == HPB_SRGN_VALID)
- srgn->srgn_state = HPB_SRGN_INVALID;
- }
- }
- spin_unlock(&hpb->rgn_state_lock);
-
- }
-
-out:
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
- rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
-
- if (ufshpb_get_state(hpb) == HPB_PRESENT)
- queue_work(ufshpb_wq, &hpb->map_work);
-}
-
-static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
-{
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn;
- unsigned long flags;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
- set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-}
-
-/*
- * This function will parse recommended active subregion information in sense
- * data field of response UPIU with SAM_STAT_GOOD state.
- */
-void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
- struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
- int data_seg_len;
-
- if (unlikely(lrbp->lun != rsp_field->lun)) {
- struct scsi_device *sdev;
- bool found = false;
-
- __shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb)
- continue;
-
- if (rsp_field->lun == hpb->lun) {
- found = true;
- break;
- }
- }
-
- if (!found)
- return;
- }
-
- if (!hpb)
- return;
-
- if (ufshpb_get_state(hpb) == HPB_INIT)
- return;
-
- if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
- (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT/SUSPEND\n",
- __func__);
- return;
- }
-
- data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
- & MASK_RSP_UPIU_DATA_SEG_LEN;
-
- /* To flush remained rsp_list, we queue the map_work task */
- if (!data_seg_len) {
- if (!ufshpb_is_general_lun(hpb->lun))
- return;
-
- ufshpb_kick_map_work(hpb);
- return;
- }
-
- BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
-
- if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
- return;
-
- hpb->stats.rb_noti_cnt++;
-
- switch (rsp_field->hpb_op) {
- case HPB_RSP_REQ_REGION_UPDATE:
- if (data_seg_len != DEV_DATA_SEG_LEN)
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: data seg length is not same.\n",
- __func__);
- ufshpb_rsp_req_region_update(hpb, rsp_field);
- break;
- case HPB_RSP_DEV_RESET:
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "UFS device lost HPB information during PM.\n");
-
- if (hpb->is_hcm) {
- struct scsi_device *sdev;
-
- __shost_for_each_device(sdev, hba->host) {
- struct ufshpb_lu *h = sdev->hostdata;
-
- if (h)
- ufshpb_dev_reset_handler(h);
- }
- }
-
- break;
- default:
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "hpb_op is not available: %d\n",
- rsp_field->hpb_op);
- break;
- }
-}
-
-static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- if (!list_empty(&rgn->list_inact_rgn))
- return;
-
- if (!list_empty(&srgn->list_act_srgn)) {
- list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
- return;
- }
-
- list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
-}
-
-static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct list_head *pending_list)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- if (!list_empty(&rgn->list_inact_rgn))
- return;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (!list_empty(&srgn->list_act_srgn))
- return;
-
- list_add_tail(&rgn->list_inact_rgn, pending_list);
-}
-
-static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
- struct ufshpb_subregion,
- list_act_srgn))) {
- if (ufshpb_get_state(hpb) == HPB_SUSPEND)
- break;
-
- list_del_init(&srgn->list_act_srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
- ret = ufshpb_add_region(hpb, rgn);
- if (ret)
- goto active_failed;
-
- ret = ufshpb_issue_map_req(hpb, rgn, srgn);
- if (ret) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "issue map_req failed. ret %d, region %d - %d\n",
- ret, rgn->rgn_idx, srgn->srgn_idx);
- goto active_failed;
- }
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- }
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- return;
-
-active_failed:
- dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
- rgn->rgn_idx, srgn->srgn_idx);
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_add_active_list(hpb, rgn, srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn;
- unsigned long flags;
- int ret;
- LIST_HEAD(pending_list);
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
- struct ufshpb_region,
- list_inact_rgn))) {
- if (ufshpb_get_state(hpb) == HPB_SUSPEND)
- break;
-
- list_del_init(&rgn->list_inact_rgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- ret = ufshpb_evict_region(hpb, rgn);
- if (ret) {
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- }
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- }
-
- list_splice(&pending_list, &hpb->lh_inact_rgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_normalization_work_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
- ufshpb_normalization_work);
- int rgn_idx;
- u8 factor = hpb->params.normalization_factor;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
- int srgn_idx;
-
- spin_lock(&rgn->rgn_lock);
- rgn->reads = 0;
- for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
- struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
-
- srgn->reads >>= factor;
- rgn->reads += srgn->reads;
- }
- spin_unlock(&rgn->rgn_lock);
-
- if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
- continue;
-
- /* if region is active but has no reads - inactivate it */
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
- spin_unlock(&hpb->rsp_list_lock);
- }
-}
-
-static void ufshpb_map_work_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT\n", __func__);
- return;
- }
-
- ufshpb_run_inactive_region_list(hpb);
- ufshpb_run_active_subregion_list(hpb);
-}
-
-/*
- * this function doesn't need to hold lock due to be called in init.
- * (rgn_state_lock, rsp_list_lock, etc..)
- */
-static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
- struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx, i;
- int err = 0;
-
- for_each_sub_region(rgn, srgn_idx, srgn) {
- srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
- srgn->srgn_state = HPB_SRGN_INVALID;
- if (!srgn->mctx) {
- err = -ENOMEM;
- dev_err(hba->dev,
- "alloc mctx for pinned region failed\n");
- goto release;
- }
-
- list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
- }
-
- rgn->rgn_state = HPB_RGN_PINNED;
- return 0;
-
-release:
- for (i = 0; i < srgn_idx; i++) {
- srgn = rgn->srgn_tbl + i;
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- }
- return err;
-}
-
-static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn, bool last)
-{
- int srgn_idx;
- struct ufshpb_subregion *srgn;
-
- for_each_sub_region(rgn, srgn_idx, srgn) {
- INIT_LIST_HEAD(&srgn->list_act_srgn);
-
- srgn->rgn_idx = rgn->rgn_idx;
- srgn->srgn_idx = srgn_idx;
- srgn->srgn_state = HPB_SRGN_UNUSED;
- }
-
- if (unlikely(last && hpb->last_srgn_entries))
- srgn->is_last = true;
-}
-
-static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn, int srgn_cnt)
-{
- rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
- GFP_KERNEL);
- if (!rgn->srgn_tbl)
- return -ENOMEM;
-
- rgn->srgn_cnt = srgn_cnt;
- return 0;
-}
-
-static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
- struct ufshpb_lu *hpb,
- struct ufshpb_dev_info *hpb_dev_info,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- u32 entries_per_rgn;
- u64 rgn_mem_size, tmp;
-
- if (ufshpb_is_legacy(hba))
- hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
- else
- hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
-
- hpb->lu_pinned_start = hpb_lu_info->pinned_start;
- hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
- (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
- : PINNED_NOT_SET;
- hpb->lru_info.max_lru_active_cnt =
- hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
-
- rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
- * HPB_ENTRY_SIZE;
- do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
- hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
- * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
-
- tmp = rgn_mem_size;
- do_div(tmp, HPB_ENTRY_SIZE);
- entries_per_rgn = (u32)tmp;
- hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
- hpb->entries_per_rgn_mask = entries_per_rgn - 1;
-
- hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
- hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
- hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
-
- tmp = rgn_mem_size;
- do_div(tmp, hpb->srgn_mem_size);
- hpb->srgns_per_rgn = (int)tmp;
-
- hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
- entries_per_rgn);
- hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
- (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
- hpb->last_srgn_entries = hpb_lu_info->num_blocks
- % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
-
- hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
-
- if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
- hpb->is_hcm = true;
-}
-
-static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn_table, *rgn;
- int rgn_idx, i;
- int ret = 0;
-
- rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
- GFP_KERNEL);
- if (!rgn_table)
- return -ENOMEM;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- int srgn_cnt = hpb->srgns_per_rgn;
- bool last_srgn = false;
-
- rgn = rgn_table + rgn_idx;
- rgn->rgn_idx = rgn_idx;
-
- spin_lock_init(&rgn->rgn_lock);
-
- INIT_LIST_HEAD(&rgn->list_inact_rgn);
- INIT_LIST_HEAD(&rgn->list_lru_rgn);
- INIT_LIST_HEAD(&rgn->list_expired_rgn);
-
- if (rgn_idx == hpb->rgns_per_lu - 1) {
- srgn_cnt = ((hpb->srgns_per_lu - 1) %
- hpb->srgns_per_rgn) + 1;
- last_srgn = true;
- }
-
- ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
- if (ret)
- goto release_srgn_table;
- ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
-
- if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
- ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
- if (ret)
- goto release_srgn_table;
- } else {
- rgn->rgn_state = HPB_RGN_INACTIVE;
- }
-
- rgn->rgn_flags = 0;
- rgn->hpb = hpb;
- }
-
- hpb->rgn_tbl = rgn_table;
-
- return 0;
-
-release_srgn_table:
- for (i = 0; i <= rgn_idx; i++)
- kvfree(rgn_table[i].srgn_tbl);
-
- kvfree(rgn_table);
- return ret;
-}
-
-static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- int srgn_idx;
- struct ufshpb_subregion *srgn;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (srgn->srgn_state != HPB_SRGN_UNUSED) {
- srgn->srgn_state = HPB_SRGN_UNUSED;
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- }
-}
-
-static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
-{
- int rgn_idx;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- struct ufshpb_region *rgn;
-
- rgn = hpb->rgn_tbl + rgn_idx;
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- rgn->rgn_state = HPB_RGN_INACTIVE;
-
- ufshpb_destroy_subregion_tbl(hpb, rgn);
- }
-
- kvfree(rgn->srgn_tbl);
- }
-
- kvfree(hpb->rgn_tbl);
-}
-
-/* SYSFS functions */
-#define ufshpb_sysfs_attr_show_func(__name) \
-static ssize_t __name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
- \
- if (!hpb) \
- return -ENODEV; \
- \
- return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
-} \
-\
-static DEVICE_ATTR_RO(__name)
-
-ufshpb_sysfs_attr_show_func(hit_cnt);
-ufshpb_sysfs_attr_show_func(miss_cnt);
-ufshpb_sysfs_attr_show_func(rb_noti_cnt);
-ufshpb_sysfs_attr_show_func(rb_active_cnt);
-ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
-ufshpb_sysfs_attr_show_func(map_req_cnt);
-ufshpb_sysfs_attr_show_func(umap_req_cnt);
-
-static struct attribute *hpb_dev_stat_attrs[] = {
- &dev_attr_hit_cnt.attr,
- &dev_attr_miss_cnt.attr,
- &dev_attr_rb_noti_cnt.attr,
- &dev_attr_rb_active_cnt.attr,
- &dev_attr_rb_inactive_cnt.attr,
- &dev_attr_map_req_cnt.attr,
- &dev_attr_umap_req_cnt.attr,
- NULL,
-};
-
-struct attribute_group ufs_sysfs_hpb_stat_group = {
- .name = "hpb_stats",
- .attrs = hpb_dev_stat_attrs,
-};
-
-/* SYSFS functions */
-#define ufshpb_sysfs_param_show_func(__name) \
-static ssize_t __name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
- \
- if (!hpb) \
- return -ENODEV; \
- \
- return sysfs_emit(buf, "%d\n", hpb->params.__name); \
-}
-
-ufshpb_sysfs_param_show_func(requeue_timeout_ms);
-static ssize_t
-requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0)
- return -EINVAL;
-
- hpb->params.requeue_timeout_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(requeue_timeout_ms);
-
-ufshpb_sysfs_param_show_func(activation_thld);
-static ssize_t
-activation_thld_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0)
- return -EINVAL;
-
- hpb->params.activation_thld = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(activation_thld);
-
-ufshpb_sysfs_param_show_func(normalization_factor);
-static ssize_t
-normalization_factor_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
- return -EINVAL;
-
- hpb->params.normalization_factor = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(normalization_factor);
-
-ufshpb_sysfs_param_show_func(eviction_thld_enter);
-static ssize_t
-eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= hpb->params.eviction_thld_exit)
- return -EINVAL;
-
- hpb->params.eviction_thld_enter = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(eviction_thld_enter);
-
-ufshpb_sysfs_param_show_func(eviction_thld_exit);
-static ssize_t
-eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= hpb->params.activation_thld)
- return -EINVAL;
-
- hpb->params.eviction_thld_exit = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(eviction_thld_exit);
-
-ufshpb_sysfs_param_show_func(read_timeout_ms);
-static ssize_t
-read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- /* read_timeout >> timeout_polling_interval */
- if (val < hpb->params.timeout_polling_interval_ms * 2)
- return -EINVAL;
-
- hpb->params.read_timeout_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(read_timeout_ms);
-
-ufshpb_sysfs_param_show_func(read_timeout_expiries);
-static ssize_t
-read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0)
- return -EINVAL;
-
- hpb->params.read_timeout_expiries = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(read_timeout_expiries);
-
-ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
-static ssize_t
-timeout_polling_interval_ms_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- /* timeout_polling_interval << read_timeout */
- if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
- return -EINVAL;
-
- hpb->params.timeout_polling_interval_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(timeout_polling_interval_ms);
-
-ufshpb_sysfs_param_show_func(inflight_map_req);
-static ssize_t inflight_map_req_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
- return -EINVAL;
-
- hpb->params.inflight_map_req = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(inflight_map_req);
-
-static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
-{
- hpb->params.activation_thld = ACTIVATION_THRESHOLD;
- hpb->params.normalization_factor = 1;
- hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
- hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
- hpb->params.read_timeout_ms = READ_TO_MS;
- hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
- hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
- hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
-}
-
-static struct attribute *hpb_dev_param_attrs[] = {
- &dev_attr_requeue_timeout_ms.attr,
- &dev_attr_activation_thld.attr,
- &dev_attr_normalization_factor.attr,
- &dev_attr_eviction_thld_enter.attr,
- &dev_attr_eviction_thld_exit.attr,
- &dev_attr_read_timeout_ms.attr,
- &dev_attr_read_timeout_expiries.attr,
- &dev_attr_timeout_polling_interval_ms.attr,
- &dev_attr_inflight_map_req.attr,
- NULL,
-};
-
-struct attribute_group ufs_sysfs_hpb_param_group = {
- .name = "hpb_params",
- .attrs = hpb_dev_param_attrs,
-};
-
-static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req = NULL, *t;
- int qd = hpb->sdev_ufs_lu->queue_depth / 2;
- int i;
-
- INIT_LIST_HEAD(&hpb->lh_pre_req_free);
-
- hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
- hpb->throttle_pre_req = qd;
- hpb->num_inflight_pre_req = 0;
-
- if (!hpb->pre_req)
- goto release_mem;
-
- for (i = 0; i < qd; i++) {
- pre_req = hpb->pre_req + i;
- INIT_LIST_HEAD(&pre_req->list_req);
- pre_req->req = NULL;
-
- pre_req->bio = bio_alloc(GFP_KERNEL, 1);
- if (!pre_req->bio)
- goto release_mem;
-
- pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pre_req->wb.m_page) {
- bio_put(pre_req->bio);
- goto release_mem;
- }
-
- list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
- }
-
- return 0;
-release_mem:
- list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
- list_del_init(&pre_req->list_req);
- bio_put(pre_req->bio);
- __free_page(pre_req->wb.m_page);
- }
-
- kfree(hpb->pre_req);
- return -ENOMEM;
-}
-
-static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req = NULL;
- int i;
-
- for (i = 0; i < hpb->throttle_pre_req; i++) {
- pre_req = hpb->pre_req + i;
- bio_put(hpb->pre_req[i].bio);
- if (!pre_req->wb.m_page)
- __free_page(hpb->pre_req[i].wb.m_page);
- list_del_init(&pre_req->list_req);
- }
-
- kfree(hpb->pre_req);
-}
-
-static void ufshpb_stat_init(struct ufshpb_lu *hpb)
-{
- hpb->stats.hit_cnt = 0;
- hpb->stats.miss_cnt = 0;
- hpb->stats.rb_noti_cnt = 0;
- hpb->stats.rb_active_cnt = 0;
- hpb->stats.rb_inactive_cnt = 0;
- hpb->stats.map_req_cnt = 0;
- hpb->stats.umap_req_cnt = 0;
-}
-
-static void ufshpb_param_init(struct ufshpb_lu *hpb)
-{
- hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
- if (hpb->is_hcm)
- ufshpb_hcm_param_init(hpb);
-}
-
-static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
-{
- int ret;
-
- spin_lock_init(&hpb->rgn_state_lock);
- spin_lock_init(&hpb->rsp_list_lock);
- spin_lock_init(&hpb->param_lock);
-
- INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
- INIT_LIST_HEAD(&hpb->lh_act_srgn);
- INIT_LIST_HEAD(&hpb->lh_inact_rgn);
- INIT_LIST_HEAD(&hpb->list_hpb_lu);
-
- INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
- if (hpb->is_hcm) {
- INIT_WORK(&hpb->ufshpb_normalization_work,
- ufshpb_normalization_work_handler);
- INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
- ufshpb_read_to_handler);
- }
-
- hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
- sizeof(struct ufshpb_req), 0, 0, NULL);
- if (!hpb->map_req_cache) {
- dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
- hpb->lun);
- return -ENOMEM;
- }
-
- hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
- sizeof(struct page *) * hpb->pages_per_srgn,
- 0, 0, NULL);
- if (!hpb->m_page_cache) {
- dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
- hpb->lun);
- ret = -ENOMEM;
- goto release_req_cache;
- }
-
- ret = ufshpb_pre_req_mempool_init(hpb);
- if (ret) {
- dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
- hpb->lun);
- goto release_m_page_cache;
- }
-
- ret = ufshpb_alloc_region_tbl(hba, hpb);
- if (ret)
- goto release_pre_req_mempool;
-
- ufshpb_stat_init(hpb);
- ufshpb_param_init(hpb);
-
- if (hpb->is_hcm) {
- unsigned int poll;
-
- poll = hpb->params.timeout_polling_interval_ms;
- schedule_delayed_work(&hpb->ufshpb_read_to_work,
- msecs_to_jiffies(poll));
- }
-
- return 0;
-
-release_pre_req_mempool:
- ufshpb_pre_req_mempool_destroy(hpb);
-release_m_page_cache:
- kmem_cache_destroy(hpb->m_page_cache);
-release_req_cache:
- kmem_cache_destroy(hpb->map_req_cache);
- return ret;
-}
-
-static struct ufshpb_lu *
-ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
- struct ufshpb_dev_info *hpb_dev_info,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- struct ufshpb_lu *hpb;
- int ret;
-
- hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
- if (!hpb)
- return NULL;
-
- hpb->lun = sdev->lun;
- hpb->sdev_ufs_lu = sdev;
-
- ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
-
- ret = ufshpb_lu_hpb_init(hba, hpb);
- if (ret) {
- dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
- goto release_hpb;
- }
-
- sdev->hostdata = hpb;
- return hpb;
-
-release_hpb:
- kfree(hpb);
- return NULL;
-}
-
-static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn, *next_rgn;
- struct ufshpb_subregion *srgn, *next_srgn;
- unsigned long flags;
-
- /*
- * If the device reset occurred, the remaining HPB region information
- * may be stale. Therefore, by discarding the lists of HPB response
- * that remained after reset, we prevent unnecessary work.
- */
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
- list_inact_rgn)
- list_del_init(&rgn->list_inact_rgn);
-
- list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
- list_act_srgn)
- list_del_init(&srgn->list_act_srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
-{
- if (hpb->is_hcm) {
- cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
- cancel_work_sync(&hpb->ufshpb_normalization_work);
- }
- cancel_work_sync(&hpb->map_work);
-}
-
-static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
-{
- int err = 0;
- bool flag_res = true;
- int try;
-
- /* wait for the device to complete HPB reset query */
- for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
- dev_dbg(hba->dev,
- "%s start flag reset polling %d times\n",
- __func__, try);
-
- /* Poll fHpbReset flag to be cleared */
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
-
- if (err) {
- dev_err(hba->dev,
- "%s reading fHpbReset flag failed with error %d\n",
- __func__, err);
- return flag_res;
- }
-
- if (!flag_res)
- goto out;
-
- usleep_range(1000, 1100);
- }
- if (flag_res) {
- dev_err(hba->dev,
- "%s fHpbReset was not cleared by the device\n",
- __func__);
- }
-out:
- return flag_res;
-}
-
-void ufshpb_reset(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (ufshpb_get_state(hpb) != HPB_RESET)
- continue;
-
- ufshpb_set_state(hpb, HPB_PRESENT);
- }
-}
-
-void ufshpb_reset_host(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT)
- continue;
- ufshpb_set_state(hpb, HPB_RESET);
- ufshpb_cancel_jobs(hpb);
- ufshpb_discard_rsp_lists(hpb);
- }
-}
-
-void ufshpb_suspend(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT)
- continue;
- ufshpb_set_state(hpb, HPB_SUSPEND);
- ufshpb_cancel_jobs(hpb);
- }
-}
-
-void ufshpb_resume(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
- (ufshpb_get_state(hpb) != HPB_SUSPEND))
- continue;
- ufshpb_set_state(hpb, HPB_PRESENT);
- ufshpb_kick_map_work(hpb);
- if (hpb->is_hcm) {
- unsigned int poll =
- hpb->params.timeout_polling_interval_ms;
-
- schedule_delayed_work(&hpb->ufshpb_read_to_work,
- msecs_to_jiffies(poll));
- }
- }
-}
-
-static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- u16 max_active_rgns;
- u8 lu_enable;
- int size;
- int ret;
- char desc_buf[QUERY_DESC_MAX_SIZE];
-
- ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
-
- ufshcd_rpm_get_sync(hba);
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- QUERY_DESC_IDN_UNIT, lun, 0,
- desc_buf, &size);
- ufshcd_rpm_put_sync(hba);
-
- if (ret) {
- dev_err(hba->dev,
- "%s: idn: %d lun: %d query request failed",
- __func__, QUERY_DESC_IDN_UNIT, lun);
- return ret;
- }
-
- lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
- if (lu_enable != LU_ENABLED_HPB_FUNC)
- return -ENODEV;
-
- max_active_rgns = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
- if (!max_active_rgns) {
- dev_err(hba->dev,
- "lun %d wrong number of max active regions\n", lun);
- return -ENODEV;
- }
-
- hpb_lu_info->num_blocks = get_unaligned_be64(
- desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
- hpb_lu_info->pinned_start = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
- hpb_lu_info->num_pinned = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
- hpb_lu_info->max_active_rgns = max_active_rgns;
-
- return 0;
-}
-
-void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb)
- return;
-
- ufshpb_set_state(hpb, HPB_FAILED);
-
- sdev = hpb->sdev_ufs_lu;
- sdev->hostdata = NULL;
-
- ufshpb_cancel_jobs(hpb);
-
- ufshpb_pre_req_mempool_destroy(hpb);
- ufshpb_destroy_region_tbl(hpb);
-
- kmem_cache_destroy(hpb->map_req_cache);
- kmem_cache_destroy(hpb->m_page_cache);
-
- list_del_init(&hpb->list_hpb_lu);
-
- kfree(hpb);
-}
-
-static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
-{
- int pool_size;
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
- bool init_success;
-
- if (tot_active_srgn_pages == 0) {
- ufshpb_remove(hba);
- return;
- }
-
- init_success = !ufshpb_check_hpb_reset_query(hba);
-
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
- if (pool_size > tot_active_srgn_pages) {
- mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
- mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
- }
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (init_success) {
- ufshpb_set_state(hpb, HPB_PRESENT);
- if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
- queue_work(ufshpb_wq, &hpb->map_work);
- if (!hpb->is_hcm)
- ufshpb_issue_umap_all_req(hpb);
- } else {
- dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
- ufshpb_destroy_lu(hba, sdev);
- }
- }
-
- if (!init_success)
- ufshpb_remove(hba);
-}
-
-void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- struct ufshpb_lu *hpb;
- int ret;
- struct ufshpb_lu_info hpb_lu_info = { 0 };
- int lun = sdev->lun;
-
- if (lun >= hba->dev_info.max_lu_supported)
- goto out;
-
- ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
- if (ret)
- goto out;
-
- hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
- &hpb_lu_info);
- if (!hpb)
- goto out;
-
- tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
- hpb->srgns_per_rgn * hpb->pages_per_srgn;
-
-out:
- /* All LUs are initialized */
- if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
- ufshpb_hpb_lu_prepared(hba);
-}
-
-static int ufshpb_init_mem_wq(struct ufs_hba *hba)
-{
- int ret;
- unsigned int pool_size;
-
- ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
- sizeof(struct ufshpb_map_ctx),
- 0, 0, NULL);
- if (!ufshpb_mctx_cache) {
- dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
- return -ENOMEM;
- }
-
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
- dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
- __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
-
- ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
- ufshpb_mctx_cache);
- if (!ufshpb_mctx_pool) {
- dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
- ret = -ENOMEM;
- goto release_mctx_cache;
- }
-
- ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
- if (!ufshpb_page_pool) {
- dev_err(hba->dev, "ufshpb: cannot init page pool\n");
- ret = -ENOMEM;
- goto release_mctx_pool;
- }
-
- ufshpb_wq = alloc_workqueue("ufshpb-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
- if (!ufshpb_wq) {
- dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
- ret = -ENOMEM;
- goto release_page_pool;
- }
-
- return 0;
-
-release_page_pool:
- mempool_destroy(ufshpb_page_pool);
-release_mctx_pool:
- mempool_destroy(ufshpb_mctx_pool);
-release_mctx_cache:
- kmem_cache_destroy(ufshpb_mctx_cache);
- return ret;
-}
-
-void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
-{
- struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
- int max_active_rgns = 0;
- int hpb_num_lu;
-
- hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
- if (hpb_num_lu == 0) {
- dev_err(hba->dev, "No HPB LU supported\n");
- hpb_info->hpb_disabled = true;
- return;
- }
-
- hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
- hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
- max_active_rgns = get_unaligned_be16(geo_buf +
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
-
- if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
- max_active_rgns == 0) {
- dev_err(hba->dev, "No HPB supported device\n");
- hpb_info->hpb_disabled = true;
- return;
- }
-}
-
-void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
- int version, ret;
- int max_single_cmd;
-
- hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
-
- version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
- if ((version != HPB_SUPPORT_VERSION) &&
- (version != HPB_SUPPORT_LEGACY_VERSION)) {
- dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
- __func__, version);
- hpb_dev_info->hpb_disabled = true;
- return;
- }
-
- if (version == HPB_SUPPORT_LEGACY_VERSION)
- hpb_dev_info->is_legacy = true;
-
- /*
- * Get the number of user logical unit to check whether all
- * scsi_device finish initialization
- */
- hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
-
- if (hpb_dev_info->is_legacy)
- return;
-
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
-
- if (ret)
- hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
- else
- hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
-}
-
-void ufshpb_init(struct ufs_hba *hba)
-{
- struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
- int try;
- int ret;
-
- if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
- return;
-
- if (ufshpb_init_mem_wq(hba)) {
- hpb_dev_info->hpb_disabled = true;
- return;
- }
-
- atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
- tot_active_srgn_pages = 0;
- /* issue HPB reset query */
- for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
- ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
- if (!ret)
- break;
- }
-}
-
-void ufshpb_remove(struct ufs_hba *hba)
-{
- mempool_destroy(ufshpb_page_pool);
- mempool_destroy(ufshpb_mctx_pool);
- kmem_cache_destroy(ufshpb_mctx_cache);
-
- destroy_workqueue(ufshpb_wq);
-}
-
-module_param(ufshpb_host_map_kbytes, uint, 0644);
-MODULE_PARM_DESC(ufshpb_host_map_kbytes,
- "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
deleted file mode 100644
index b475dbd78988..000000000000
--- a/drivers/scsi/ufs/ufshpb.h
+++ /dev/null
@@ -1,320 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Universal Flash Storage Host Performance Booster
- *
- * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Yongmyung Lee <ymhungry.lee@samsung.com>
- * Jinyoung Choi <j-young.choi@samsung.com>
- */
-
-#ifndef _UFSHPB_H_
-#define _UFSHPB_H_
-
-/* hpb response UPIU macro */
-#define HPB_RSP_NONE 0x0
-#define HPB_RSP_REQ_REGION_UPDATE 0x1
-#define HPB_RSP_DEV_RESET 0x2
-#define MAX_ACTIVE_NUM 2
-#define MAX_INACTIVE_NUM 2
-#define DEV_DATA_SEG_LEN 0x14
-#define DEV_SENSE_SEG_LEN 0x12
-#define DEV_DES_TYPE 0x80
-#define DEV_ADDITIONAL_LEN 0x10
-
-/* hpb map & entries macro */
-#define HPB_RGN_SIZE_UNIT 512
-#define HPB_ENTRY_BLOCK_SIZE 4096
-#define HPB_ENTRY_SIZE 0x8
-#define PINNED_NOT_SET U32_MAX
-
-/* hpb support chunk size */
-#define HPB_LEGACY_CHUNK_HIGH 1
-#define HPB_MULTI_CHUNK_HIGH 255
-
-/* hpb vender defined opcode */
-#define UFSHPB_READ 0xF8
-#define UFSHPB_READ_BUFFER 0xF9
-#define UFSHPB_READ_BUFFER_ID 0x01
-#define UFSHPB_WRITE_BUFFER 0xFA
-#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01
-#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02
-#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03
-#define HPB_WRITE_BUFFER_CMD_LENGTH 10
-#define MAX_HPB_READ_ID 0x7F
-#define HPB_READ_BUFFER_CMD_LENGTH 10
-#define LU_ENABLED_HPB_FUNC 0x02
-
-#define HPB_RESET_REQ_RETRIES 10
-#define HPB_MAP_REQ_RETRIES 5
-#define HPB_REQUEUE_TIME_MS 0
-
-#define HPB_SUPPORT_VERSION 0x200
-#define HPB_SUPPORT_LEGACY_VERSION 0x100
-
-enum UFSHPB_MODE {
- HPB_HOST_CONTROL,
- HPB_DEVICE_CONTROL,
-};
-
-enum UFSHPB_STATE {
- HPB_INIT = 0,
- HPB_PRESENT = 1,
- HPB_SUSPEND,
- HPB_FAILED,
- HPB_RESET,
-};
-
-enum HPB_RGN_STATE {
- HPB_RGN_INACTIVE,
- HPB_RGN_ACTIVE,
- /* pinned regions are always active */
- HPB_RGN_PINNED,
-};
-
-enum HPB_SRGN_STATE {
- HPB_SRGN_UNUSED,
- HPB_SRGN_INVALID,
- HPB_SRGN_VALID,
- HPB_SRGN_ISSUED,
-};
-
-/**
- * struct ufshpb_lu_info - UFSHPB logical unit related info
- * @num_blocks: the number of logical block
- * @pinned_start: the start region number of pinned region
- * @num_pinned: the number of pinned regions
- * @max_active_rgns: maximum number of active regions
- */
-struct ufshpb_lu_info {
- int num_blocks;
- int pinned_start;
- int num_pinned;
- int max_active_rgns;
-};
-
-struct ufshpb_map_ctx {
- struct page **m_page;
- unsigned long *ppn_dirty;
-};
-
-struct ufshpb_subregion {
- struct ufshpb_map_ctx *mctx;
- enum HPB_SRGN_STATE srgn_state;
- int rgn_idx;
- int srgn_idx;
- bool is_last;
-
- /* subregion reads - for host mode */
- unsigned int reads;
-
- /* below information is used by rsp_list */
- struct list_head list_act_srgn;
-};
-
-struct ufshpb_region {
- struct ufshpb_lu *hpb;
- struct ufshpb_subregion *srgn_tbl;
- enum HPB_RGN_STATE rgn_state;
- int rgn_idx;
- int srgn_cnt;
-
- /* below information is used by rsp_list */
- struct list_head list_inact_rgn;
-
- /* below information is used by lru */
- struct list_head list_lru_rgn;
- unsigned long rgn_flags;
-#define RGN_FLAG_DIRTY 0
-#define RGN_FLAG_UPDATE 1
-
- /* region reads - for host mode */
- spinlock_t rgn_lock;
- unsigned int reads;
- /* region "cold" timer - for host mode */
- ktime_t read_timeout;
- unsigned int read_timeout_expiries;
- struct list_head list_expired_rgn;
-};
-
-#define for_each_sub_region(rgn, i, srgn) \
- for ((i) = 0; \
- ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
- (i)++)
-
-/**
- * struct ufshpb_req - HPB related request structure (write/read buffer)
- * @req: block layer request structure
- * @bio: bio for this request
- * @hpb: ufshpb_lu structure that related to
- * @list_req: ufshpb_req mempool list
- * @sense: store its sense data
- * @mctx: L2P map information
- * @rgn_idx: target region index
- * @srgn_idx: target sub-region index
- * @lun: target logical unit number
- * @m_page: L2P map information data for pre-request
- * @len: length of host-side cached L2P map in m_page
- * @lpn: start LPN of L2P map in m_page
- */
-struct ufshpb_req {
- struct request *req;
- struct bio *bio;
- struct ufshpb_lu *hpb;
- struct list_head list_req;
- union {
- struct {
- struct ufshpb_map_ctx *mctx;
- unsigned int rgn_idx;
- unsigned int srgn_idx;
- unsigned int lun;
- } rb;
- struct {
- struct page *m_page;
- unsigned int len;
- unsigned long lpn;
- } wb;
- };
-};
-
-struct victim_select_info {
- struct list_head lh_lru_rgn; /* LRU list of regions */
- int max_lru_active_cnt; /* supported hpb #region - pinned #region */
- atomic_t active_cnt;
-};
-
-/**
- * ufshpb_params - ufs hpb parameters
- * @requeue_timeout_ms - requeue threshold of wb command (0x2)
- * @activation_thld - min reads [IOs] to activate/update a region
- * @normalization_factor - shift right the region's reads
- * @eviction_thld_enter - min reads [IOs] for the entering region in eviction
- * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction
- * @read_timeout_ms - timeout [ms] from the last read IO to the region
- * @read_timeout_expiries - amount of allowable timeout expireis
- * @timeout_polling_interval_ms - frequency in which timeouts are checked
- * @inflight_map_req - number of inflight map requests
- */
-struct ufshpb_params {
- unsigned int requeue_timeout_ms;
- unsigned int activation_thld;
- unsigned int normalization_factor;
- unsigned int eviction_thld_enter;
- unsigned int eviction_thld_exit;
- unsigned int read_timeout_ms;
- unsigned int read_timeout_expiries;
- unsigned int timeout_polling_interval_ms;
- unsigned int inflight_map_req;
-};
-
-struct ufshpb_stats {
- u64 hit_cnt;
- u64 miss_cnt;
- u64 rb_noti_cnt;
- u64 rb_active_cnt;
- u64 rb_inactive_cnt;
- u64 map_req_cnt;
- u64 pre_req_cnt;
- u64 umap_req_cnt;
-};
-
-struct ufshpb_lu {
- int lun;
- struct scsi_device *sdev_ufs_lu;
-
- spinlock_t rgn_state_lock; /* for protect rgn/srgn state */
- struct ufshpb_region *rgn_tbl;
-
- atomic_t hpb_state;
-
- spinlock_t rsp_list_lock;
- struct list_head lh_act_srgn; /* hold rsp_list_lock */
- struct list_head lh_inact_rgn; /* hold rsp_list_lock */
-
- /* pre request information */
- struct ufshpb_req *pre_req;
- int num_inflight_pre_req;
- int throttle_pre_req;
- int num_inflight_map_req; /* hold param_lock */
- spinlock_t param_lock;
-
- struct list_head lh_pre_req_free;
- int pre_req_max_tr_len;
-
- /* cached L2P map management worker */
- struct work_struct map_work;
-
- /* for selecting victim */
- struct victim_select_info lru_info;
- struct work_struct ufshpb_normalization_work;
- struct delayed_work ufshpb_read_to_work;
- unsigned long work_data_bits;
-#define TIMEOUT_WORK_RUNNING 0
-
- /* pinned region information */
- u32 lu_pinned_start;
- u32 lu_pinned_end;
-
- /* HPB related configuration */
- u32 rgns_per_lu;
- u32 srgns_per_lu;
- u32 last_srgn_entries;
- int srgns_per_rgn;
- u32 srgn_mem_size;
- u32 entries_per_rgn_mask;
- u32 entries_per_rgn_shift;
- u32 entries_per_srgn;
- u32 entries_per_srgn_mask;
- u32 entries_per_srgn_shift;
- u32 pages_per_srgn;
-
- bool is_hcm;
-
- struct ufshpb_stats stats;
- struct ufshpb_params params;
-
- struct kmem_cache *map_req_cache;
- struct kmem_cache *m_page_cache;
-
- struct list_head list_hpb_lu;
-};
-
-struct ufs_hba;
-struct ufshcd_lrb;
-
-#ifndef CONFIG_SCSI_UFS_HPB
-static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; }
-static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {}
-static void ufshpb_resume(struct ufs_hba *hba) {}
-static void ufshpb_suspend(struct ufs_hba *hba) {}
-static void ufshpb_reset(struct ufs_hba *hba) {}
-static void ufshpb_reset_host(struct ufs_hba *hba) {}
-static void ufshpb_init(struct ufs_hba *hba) {}
-static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
-static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
-static void ufshpb_remove(struct ufs_hba *hba) {}
-static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; }
-static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {}
-static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {}
-static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; }
-#else
-int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
-void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
-void ufshpb_resume(struct ufs_hba *hba);
-void ufshpb_suspend(struct ufs_hba *hba);
-void ufshpb_reset(struct ufs_hba *hba);
-void ufshpb_reset_host(struct ufs_hba *hba);
-void ufshpb_init(struct ufs_hba *hba);
-void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev);
-void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev);
-void ufshpb_remove(struct ufs_hba *hba);
-bool ufshpb_is_allowed(struct ufs_hba *hba);
-void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf);
-void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf);
-bool ufshpb_is_legacy(struct ufs_hba *hba);
-extern struct attribute_group ufs_sysfs_hpb_stat_group;
-extern struct attribute_group ufs_sysfs_hpb_param_group;
-#endif
-
-#endif /* End of Header */
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
deleted file mode 100644
index 8e9e486a4f7b..000000000000
--- a/drivers/scsi/ufs/unipro.h
+++ /dev/null
@@ -1,332 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * drivers/scsi/ufs/unipro.h
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- */
-
-#ifndef _UNIPRO_H_
-#define _UNIPRO_H_
-
-/*
- * M-TX Configuration Attributes
- */
-#define TX_HIBERN8TIME_CAPABILITY 0x000F
-#define TX_MODE 0x0021
-#define TX_HSRATE_SERIES 0x0022
-#define TX_HSGEAR 0x0023
-#define TX_PWMGEAR 0x0024
-#define TX_AMPLITUDE 0x0025
-#define TX_HS_SLEWRATE 0x0026
-#define TX_SYNC_SOURCE 0x0027
-#define TX_HS_SYNC_LENGTH 0x0028
-#define TX_HS_PREPARE_LENGTH 0x0029
-#define TX_LS_PREPARE_LENGTH 0x002A
-#define TX_HIBERN8_CONTROL 0x002B
-#define TX_LCC_ENABLE 0x002C
-#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
-#define TX_BYPASS_8B10B_ENABLE 0x002E
-#define TX_DRIVER_POLARITY 0x002F
-#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
-#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
-#define TX_LCC_SEQUENCER 0x0032
-#define TX_MIN_ACTIVATETIME 0x0033
-#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
-#define TX_REFCLKFREQ 0x00EB
-#define TX_CFGCLKFREQVAL 0x00EC
-#define CFGEXTRATTR 0x00F0
-#define DITHERCTRL2 0x00F1
-
-/*
- * M-RX Configuration Attributes
- */
-#define RX_MODE 0x00A1
-#define RX_HSRATE_SERIES 0x00A2
-#define RX_HSGEAR 0x00A3
-#define RX_PWMGEAR 0x00A4
-#define RX_LS_TERMINATED_ENABLE 0x00A5
-#define RX_HS_UNTERMINATED_ENABLE 0x00A6
-#define RX_ENTER_HIBERN8 0x00A7
-#define RX_BYPASS_8B10B_ENABLE 0x00A8
-#define RX_TERMINATION_FORCE_ENABLE 0x00A9
-#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
-#define RX_HIBERN8TIME_CAPABILITY 0x0092
-#define RX_REFCLKFREQ 0x00EB
-#define RX_CFGCLKFREQVAL 0x00EC
-#define CFGWIDEINLN 0x00F0
-#define CFGRXCDR8 0x00BA
-#define ENARXDIRECTCFG4 0x00F2
-#define CFGRXOVR8 0x00BD
-#define RXDIRECTCTRL2 0x00C7
-#define ENARXDIRECTCFG3 0x00F3
-#define RXCALCTRL 0x00B4
-#define ENARXDIRECTCFG2 0x00F4
-#define CFGRXOVR4 0x00E9
-#define RXSQCTRL 0x00B5
-#define CFGRXOVR6 0x00BF
-#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B
-#define RX_HS_G1_PREP_LENGTH_CAP 0x008C
-#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094
-#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095
-#define RX_HS_G2_PREP_LENGTH_CAP 0x0096
-#define RX_HS_G3_PREP_LENGTH_CAP 0x0097
-#define RX_ADV_GRANULARITY_CAP 0x0098
-#define RX_MIN_ACTIVATETIME_CAP 0x008F
-#define RX_HIBERN8TIME_CAP 0x0092
-#define RX_ADV_HIBERN8TIME_CAP 0x0099
-#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A
-
-
-#define is_mphy_tx_attr(attr) (attr < RX_MODE)
-#define RX_ADV_FINE_GRAN_STEP(x) ((((x) & 0x3) << 1) | 0x1)
-#define SYNC_LEN_FINE(x) ((x) & 0x3F)
-#define SYNC_LEN_COARSE(x) ((1 << 6) | ((x) & 0x3F))
-#define PREP_LEN(x) ((x) & 0xF)
-
-#define RX_MIN_ACTIVATETIME_UNIT_US 100
-#define HIBERN8TIME_UNIT_US 100
-
-/*
- * Common Block Attributes
- */
-#define TX_GLOBALHIBERNATE UNIPRO_CB_OFFSET(0x002B)
-#define REFCLKMODE UNIPRO_CB_OFFSET(0x00BF)
-#define DIRECTCTRL19 UNIPRO_CB_OFFSET(0x00CD)
-#define DIRECTCTRL10 UNIPRO_CB_OFFSET(0x00E6)
-#define CDIRECTCTRL6 UNIPRO_CB_OFFSET(0x00EA)
-#define RTOBSERVESELECT UNIPRO_CB_OFFSET(0x00F0)
-#define CBDIVFACTOR UNIPRO_CB_OFFSET(0x00F1)
-#define CBDCOCTRL5 UNIPRO_CB_OFFSET(0x00F3)
-#define CBPRGPLL2 UNIPRO_CB_OFFSET(0x00F8)
-#define CBPRGTUNING UNIPRO_CB_OFFSET(0x00FB)
-
-#define UNIPRO_CB_OFFSET(x) (0x8000 | x)
-
-/*
- * PHY Adpater attributes
- */
-#define PA_ACTIVETXDATALANES 0x1560
-#define PA_ACTIVERXDATALANES 0x1580
-#define PA_TXTRAILINGCLOCKS 0x1564
-#define PA_PHY_TYPE 0x1500
-#define PA_AVAILTXDATALANES 0x1520
-#define PA_AVAILRXDATALANES 0x1540
-#define PA_MINRXTRAILINGCLOCKS 0x1543
-#define PA_TXPWRSTATUS 0x1567
-#define PA_RXPWRSTATUS 0x1582
-#define PA_TXFORCECLOCK 0x1562
-#define PA_TXPWRMODE 0x1563
-#define PA_LEGACYDPHYESCDL 0x1570
-#define PA_MAXTXSPEEDFAST 0x1521
-#define PA_MAXTXSPEEDSLOW 0x1522
-#define PA_MAXRXSPEEDFAST 0x1541
-#define PA_MAXRXSPEEDSLOW 0x1542
-#define PA_TXLINKSTARTUPHS 0x1544
-#define PA_LOCAL_TX_LCC_ENABLE 0x155E
-#define PA_TXSPEEDFAST 0x1565
-#define PA_TXSPEEDSLOW 0x1566
-#define PA_REMOTEVERINFO 0x15A0
-#define PA_TXGEAR 0x1568
-#define PA_TXTERMINATION 0x1569
-#define PA_HSSERIES 0x156A
-#define PA_PWRMODE 0x1571
-#define PA_RXGEAR 0x1583
-#define PA_RXTERMINATION 0x1584
-#define PA_MAXRXPWMGEAR 0x1586
-#define PA_MAXRXHSGEAR 0x1587
-#define PA_RXHSUNTERMCAP 0x15A5
-#define PA_RXLSTERMCAP 0x15A6
-#define PA_GRANULARITY 0x15AA
-#define PA_PACPREQTIMEOUT 0x1590
-#define PA_PACPREQEOBTIMEOUT 0x1591
-#define PA_HIBERN8TIME 0x15A7
-#define PA_LOCALVERINFO 0x15A9
-#define PA_GRANULARITY 0x15AA
-#define PA_TACTIVATE 0x15A8
-#define PA_PACPFRAMECOUNT 0x15C0
-#define PA_PACPERRORCOUNT 0x15C1
-#define PA_PHYTESTCONTROL 0x15C2
-#define PA_PWRMODEUSERDATA0 0x15B0
-#define PA_PWRMODEUSERDATA1 0x15B1
-#define PA_PWRMODEUSERDATA2 0x15B2
-#define PA_PWRMODEUSERDATA3 0x15B3
-#define PA_PWRMODEUSERDATA4 0x15B4
-#define PA_PWRMODEUSERDATA5 0x15B5
-#define PA_PWRMODEUSERDATA6 0x15B6
-#define PA_PWRMODEUSERDATA7 0x15B7
-#define PA_PWRMODEUSERDATA8 0x15B8
-#define PA_PWRMODEUSERDATA9 0x15B9
-#define PA_PWRMODEUSERDATA10 0x15BA
-#define PA_PWRMODEUSERDATA11 0x15BB
-#define PA_CONNECTEDTXDATALANES 0x1561
-#define PA_CONNECTEDRXDATALANES 0x1581
-#define PA_LOGICALLANEMAP 0x15A1
-#define PA_SLEEPNOCONFIGTIME 0x15A2
-#define PA_STALLNOCONFIGTIME 0x15A3
-#define PA_SAVECONFIGTIME 0x15A4
-#define PA_TXHSADAPTTYPE 0x15D4
-
-/* Adpat type for PA_TXHSADAPTTYPE attribute */
-#define PA_REFRESH_ADAPT 0x00
-#define PA_INITIAL_ADAPT 0x01
-#define PA_NO_ADAPT 0x03
-
-#define PA_TACTIVATE_TIME_UNIT_US 10
-#define PA_HIBERN8_TIME_UNIT_US 100
-
-/*Other attributes*/
-#define VS_MPHYCFGUPDT 0xD085
-#define VS_DEBUGOMC 0xD09E
-#define VS_POWERSTATE 0xD083
-
-#define PA_GRANULARITY_MIN_VAL 1
-#define PA_GRANULARITY_MAX_VAL 6
-
-/* PHY Adapter Protocol Constants */
-#define PA_MAXDATALANES 4
-
-#define DL_FC0ProtectionTimeOutVal_Default 8191
-#define DL_TC0ReplayTimeOutVal_Default 65535
-#define DL_AFC0ReqTimeOutVal_Default 32767
-#define DL_FC1ProtectionTimeOutVal_Default 8191
-#define DL_TC1ReplayTimeOutVal_Default 65535
-#define DL_AFC1ReqTimeOutVal_Default 32767
-
-#define DME_LocalFC0ProtectionTimeOutVal 0xD041
-#define DME_LocalTC0ReplayTimeOutVal 0xD042
-#define DME_LocalAFC0ReqTimeOutVal 0xD043
-
-/* PA power modes */
-enum {
- FAST_MODE = 1,
- SLOW_MODE = 2,
- FASTAUTO_MODE = 4,
- SLOWAUTO_MODE = 5,
- UNCHANGED = 7,
-};
-
-#define PWRMODE_MASK 0xF
-#define PWRMODE_RX_OFFSET 4
-
-/* PA TX/RX Frequency Series */
-enum {
- PA_HS_MODE_A = 1,
- PA_HS_MODE_B = 2,
-};
-
-enum ufs_pwm_gear_tag {
- UFS_PWM_DONT_CHANGE, /* Don't change Gear */
- UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
- UFS_PWM_G2, /* PWM Gear 2 */
- UFS_PWM_G3, /* PWM Gear 3 */
- UFS_PWM_G4, /* PWM Gear 4 */
- UFS_PWM_G5, /* PWM Gear 5 */
- UFS_PWM_G6, /* PWM Gear 6 */
- UFS_PWM_G7, /* PWM Gear 7 */
-};
-
-enum ufs_hs_gear_tag {
- UFS_HS_DONT_CHANGE, /* Don't change Gear */
- UFS_HS_G1, /* HS Gear 1 (default for reset) */
- UFS_HS_G2, /* HS Gear 2 */
- UFS_HS_G3, /* HS Gear 3 */
- UFS_HS_G4, /* HS Gear 4 */
-};
-
-enum ufs_unipro_ver {
- UFS_UNIPRO_VER_RESERVED = 0,
- UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
- UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
- UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
- UFS_UNIPRO_VER_1_61 = 4, /* UniPro version 1.61 */
- UFS_UNIPRO_VER_1_8 = 5, /* UniPro version 1.8 */
- UFS_UNIPRO_VER_MAX = 6, /* UniPro unsupported version */
- /* UniPro version field mask in PA_LOCALVERINFO */
- UFS_UNIPRO_VER_MASK = 0xF,
-};
-
-/*
- * Data Link Layer Attributes
- */
-#define DL_TC0TXFCTHRESHOLD 0x2040
-#define DL_FC0PROTTIMEOUTVAL 0x2041
-#define DL_TC0REPLAYTIMEOUTVAL 0x2042
-#define DL_AFC0REQTIMEOUTVAL 0x2043
-#define DL_AFC0CREDITTHRESHOLD 0x2044
-#define DL_TC0OUTACKTHRESHOLD 0x2045
-#define DL_TC1TXFCTHRESHOLD 0x2060
-#define DL_FC1PROTTIMEOUTVAL 0x2061
-#define DL_TC1REPLAYTIMEOUTVAL 0x2062
-#define DL_AFC1REQTIMEOUTVAL 0x2063
-#define DL_AFC1CREDITTHRESHOLD 0x2064
-#define DL_TC1OUTACKTHRESHOLD 0x2065
-#define DL_TXPREEMPTIONCAP 0x2000
-#define DL_TC0TXMAXSDUSIZE 0x2001
-#define DL_TC0RXINITCREDITVAL 0x2002
-#define DL_TC0TXBUFFERSIZE 0x2005
-#define DL_PEERTC0PRESENT 0x2046
-#define DL_PEERTC0RXINITCREVAL 0x2047
-#define DL_TC1TXMAXSDUSIZE 0x2003
-#define DL_TC1RXINITCREDITVAL 0x2004
-#define DL_TC1TXBUFFERSIZE 0x2006
-#define DL_PEERTC1PRESENT 0x2066
-#define DL_PEERTC1RXINITCREVAL 0x2067
-
-/*
- * Network Layer Attributes
- */
-#define N_DEVICEID 0x3000
-#define N_DEVICEID_VALID 0x3001
-#define N_TC0TXMAXSDUSIZE 0x3020
-#define N_TC1TXMAXSDUSIZE 0x3021
-
-/*
- * Transport Layer Attributes
- */
-#define T_NUMCPORTS 0x4000
-#define T_NUMTESTFEATURES 0x4001
-#define T_CONNECTIONSTATE 0x4020
-#define T_PEERDEVICEID 0x4021
-#define T_PEERCPORTID 0x4022
-#define T_TRAFFICCLASS 0x4023
-#define T_PROTOCOLID 0x4024
-#define T_CPORTFLAGS 0x4025
-#define T_TXTOKENVALUE 0x4026
-#define T_RXTOKENVALUE 0x4027
-#define T_LOCALBUFFERSPACE 0x4028
-#define T_PEERBUFFERSPACE 0x4029
-#define T_CREDITSTOSEND 0x402A
-#define T_CPORTMODE 0x402B
-#define T_TC0TXMAXSDUSIZE 0x4060
-#define T_TC1TXMAXSDUSIZE 0x4061
-
-#ifdef FALSE
-#undef FALSE
-#endif
-
-#ifdef TRUE
-#undef TRUE
-#endif
-
-/* Boolean attribute values */
-enum {
- FALSE = 0,
- TRUE,
-};
-
-/* CPort setting */
-#define E2EFC_ON (1 << 0)
-#define E2EFC_OFF (0 << 0)
-#define CSD_N_ON (0 << 1)
-#define CSD_N_OFF (1 << 1)
-#define CSV_N_ON (0 << 2)
-#define CSV_N_OFF (1 << 2)
-#define CPORT_DEF_FLAGS (CSV_N_OFF | CSD_N_OFF | E2EFC_OFF)
-
-/* CPort connection state */
-enum {
- CPORT_IDLE = 0,
- CPORT_CONNECTED,
-};
-
-#endif /* _UNIPRO_H_ */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 28e1d98ae102..2a79ab16134b 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -141,10 +141,10 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
break;
case VIRTIO_SCSI_S_TARGET_FAILURE:
- set_host_byte(sc, DID_TARGET_FAILURE);
+ set_host_byte(sc, DID_BAD_TARGET);
break;
case VIRTIO_SCSI_S_NEXUS_FAILURE:
- set_host_byte(sc, DID_NEXUS_FAILURE);
+ set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT);
break;
default:
scmd_printk(KERN_WARNING, sc, "Unknown response %d",
@@ -528,7 +528,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
if (!rq || !scsi_prot_sg_count(sc))
return;
- bi = blk_get_integrity(rq->rq_disk);
+ bi = blk_get_integrity(rq->q->disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
@@ -711,12 +711,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
-static int virtscsi_map_queues(struct Scsi_Host *shost)
+static void virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
+ blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
@@ -778,7 +778,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
@@ -988,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = {
.remove = virtscsi_remove,
};
-static int __init init(void)
+static int __init virtio_scsi_init(void)
{
int ret = -ENOMEM;
@@ -1020,14 +1020,14 @@ error:
return ret;
}
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio SCSI HBA driver");
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 1f037b8ab904..f88ecdb93a8a 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1324,7 +1324,6 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
* indicate success.
*/
header = config_page;
- memset(header, 0, sizeof *header);
header->hostStatus = BTSTAT_INVPARAM;
header->scsiStatus = SDSTAT_CHECK;
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index 51a82f7803d3..9d16cf925483 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc {
u8 tag;
u8 bus;
u8 target;
- u8 vcpuHint;
- u8 unused[59];
+ u16 vcpuHint;
+ u8 unused[58];
} __packed;
/*
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 7d2f00f3571a..e4fafc77bd20 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -162,65 +162,6 @@ module_param(setup_strings, charp, 0);
static void wd33c93_execute(struct Scsi_Host *instance);
-#ifdef CONFIG_WD33C93_PIO
-static inline uchar
-read_wd33c93(const wd33c93_regs regs, uchar reg_num)
-{
- uchar data;
-
- outb(reg_num, regs.SASR);
- data = inb(regs.SCMD);
- return data;
-}
-
-static inline unsigned long
-read_wd33c93_count(const wd33c93_regs regs)
-{
- unsigned long value;
-
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- value = inb(regs.SCMD) << 16;
- value |= inb(regs.SCMD) << 8;
- value |= inb(regs.SCMD);
- return value;
-}
-
-static inline uchar
-read_aux_stat(const wd33c93_regs regs)
-{
- return inb(regs.SASR);
-}
-
-static inline void
-write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value)
-{
- outb(reg_num, regs.SASR);
- outb(value, regs.SCMD);
-}
-
-static inline void
-write_wd33c93_count(const wd33c93_regs regs, unsigned long value)
-{
- outb(WD_TRANSFER_COUNT_MSB, regs.SASR);
- outb((value >> 16) & 0xff, regs.SCMD);
- outb((value >> 8) & 0xff, regs.SCMD);
- outb( value & 0xff, regs.SCMD);
-}
-
-#define write_wd33c93_cmd(regs, cmd) \
- write_wd33c93((regs), WD_COMMAND, (cmd))
-
-static inline void
-write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
-{
- int i;
-
- outb(WD_CDB_1, regs.SASR);
- for (i=0; i<len; i++)
- outb(cmnd[i], regs.SCMD);
-}
-
-#else /* CONFIG_WD33C93_PIO */
static inline uchar
read_wd33c93(const wd33c93_regs regs, uchar reg_num)
{
@@ -287,7 +228,6 @@ write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[])
for (i = 0; i < len; i++)
*regs.SCMD = cmnd[i];
}
-#endif /* CONFIG_WD33C93_PIO */
static inline uchar
read_1_byte(const wd33c93_regs regs)
@@ -364,6 +304,7 @@ calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast,
static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct WD33C93_hostdata *hostdata;
struct scsi_cmnd *tmp;
@@ -395,15 +336,15 @@ static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd)
*/
if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
+ scsi_pointer->buffer = scsi_sglist(cmd);
+ scsi_pointer->buffers_residual = scsi_sg_count(cmd) - 1;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
} else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
+ scsi_pointer->buffer = NULL;
+ scsi_pointer->buffers_residual = 0;
+ scsi_pointer->ptr = NULL;
+ scsi_pointer->this_residual = 0;
}
/* WD docs state that at the conclusion of a "LEVEL2" command, the
@@ -423,7 +364,7 @@ static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd)
* status byte is stored.
*/
- cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
+ scsi_pointer->Status = ILLEGAL_STATUS_BYTE;
/*
* Add the cmd to the end of 'input_Q'. Note that REQUEST SENSE
@@ -470,6 +411,7 @@ DEF_SCSI_QCMD(wd33c93_queuecommand)
static void
wd33c93_execute(struct Scsi_Host *instance)
{
+ struct scsi_pointer *scsi_pointer;
struct WD33C93_hostdata *hostdata =
(struct WD33C93_hostdata *) instance->hostdata;
const wd33c93_regs regs = hostdata->regs;
@@ -546,7 +488,8 @@ wd33c93_execute(struct Scsi_Host *instance)
* to change around and experiment with for now.
*/
- cmd->SCp.phase = 0; /* assume no disconnect */
+ scsi_pointer = WD33C93_scsi_pointer(cmd);
+ scsi_pointer->phase = 0; /* assume no disconnect */
if (hostdata->disconnect == DIS_NEVER)
goto no;
if (hostdata->disconnect == DIS_ALWAYS)
@@ -563,7 +506,7 @@ wd33c93_execute(struct Scsi_Host *instance)
(prev->device->lun != cmd->device->lun)) {
for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev;
prev = (struct scsi_cmnd *) prev->host_scribble)
- prev->SCp.phase = 1;
+ WD33C93_scsi_pointer(prev)->phase = 1;
goto yes;
}
}
@@ -571,7 +514,7 @@ wd33c93_execute(struct Scsi_Host *instance)
goto no;
yes:
- cmd->SCp.phase = 1;
+ scsi_pointer->phase = 1;
#ifdef PROC_STATISTICS
hostdata->disc_allowed_cnt[cmd->device->id]++;
@@ -579,7 +522,7 @@ wd33c93_execute(struct Scsi_Host *instance)
no:
- write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
+ write_wd33c93(regs, WD_SOURCE_ID, scsi_pointer->phase ? SRCID_ER : 0);
write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun);
write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
@@ -648,14 +591,14 @@ wd33c93_execute(struct Scsi_Host *instance)
* up ahead of time.
*/
- if ((cmd->SCp.phase == 0) && (hostdata->no_dma == 0)) {
+ if (scsi_pointer->phase == 0 && hostdata->no_dma == 0) {
if (hostdata->dma_setup(cmd,
(cmd->sc_data_direction == DMA_TO_DEVICE) ?
DATA_OUT_DIR : DATA_IN_DIR))
write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */
else {
write_wd33c93_count(regs,
- cmd->SCp.this_residual);
+ scsi_pointer->this_residual);
write_wd33c93(regs, WD_CONTROL,
CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
hostdata->dma = D_DMA_RUNNING;
@@ -675,7 +618,7 @@ wd33c93_execute(struct Scsi_Host *instance)
*/
DB(DB_EXECUTE,
- printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
+ printk("%s)EX-2 ", scsi_pointer->phase ? "d:" : ""))
}
static void
@@ -717,6 +660,7 @@ static void
transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
int data_in_dir)
{
+ struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
struct WD33C93_hostdata *hostdata;
unsigned long length;
@@ -730,13 +674,13 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
* now we need to setup the next scatter-gather buffer as the
* source or destination for THIS transfer.
*/
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
+ if (!scsi_pointer->this_residual && scsi_pointer->buffers_residual) {
+ scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
+ --scsi_pointer->buffers_residual;
+ scsi_pointer->this_residual = scsi_pointer->buffer->length;
+ scsi_pointer->ptr = sg_virt(scsi_pointer->buffer);
}
- if (!cmd->SCp.this_residual) /* avoid bogus setups */
+ if (!scsi_pointer->this_residual) /* avoid bogus setups */
return;
write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
@@ -750,11 +694,12 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
#ifdef PROC_STATISTICS
hostdata->pio_cnt++;
#endif
- transfer_pio(regs, (uchar *) cmd->SCp.ptr,
- cmd->SCp.this_residual, data_in_dir, hostdata);
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_wd33c93_count(regs);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ transfer_pio(regs, (uchar *) scsi_pointer->ptr,
+ scsi_pointer->this_residual, data_in_dir,
+ hostdata);
+ length = scsi_pointer->this_residual;
+ scsi_pointer->this_residual = read_wd33c93_count(regs);
+ scsi_pointer->ptr += length - scsi_pointer->this_residual;
}
/* We are able to do DMA (in fact, the Amiga hardware is
@@ -771,10 +716,10 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
hostdata->dma_cnt++;
#endif
write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode);
- write_wd33c93_count(regs, cmd->SCp.this_residual);
+ write_wd33c93_count(regs, scsi_pointer->this_residual);
if ((hostdata->level2 >= L2_DATA) ||
- (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
+ (hostdata->level2 == L2_BASIC && scsi_pointer->phase == 0)) {
write_wd33c93(regs, WD_COMMAND_PHASE, 0x45);
write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
@@ -788,6 +733,7 @@ transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd,
void
wd33c93_intr(struct Scsi_Host *instance)
{
+ struct scsi_pointer *scsi_pointer;
struct WD33C93_hostdata *hostdata =
(struct WD33C93_hostdata *) instance->hostdata;
const wd33c93_regs regs = hostdata->regs;
@@ -806,6 +752,7 @@ wd33c93_intr(struct Scsi_Host *instance)
#endif
cmd = (struct scsi_cmnd *) hostdata->connected; /* assume we're connected */
+ scsi_pointer = WD33C93_scsi_pointer(cmd);
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear the interrupt */
phs = read_wd33c93(regs, WD_COMMAND_PHASE);
@@ -827,14 +774,14 @@ wd33c93_intr(struct Scsi_Host *instance)
*/
if (hostdata->dma == D_DMA_RUNNING) {
DB(DB_TRANSFER,
- printk("[%p/%d:", cmd->SCp.ptr, cmd->SCp.this_residual))
+ printk("[%p/%d:", scsi_pointer->ptr, scsi_pointer->this_residual))
hostdata->dma_stop(cmd->device->host, cmd, 1);
hostdata->dma = D_DMA_OFF;
- length = cmd->SCp.this_residual;
- cmd->SCp.this_residual = read_wd33c93_count(regs);
- cmd->SCp.ptr += (length - cmd->SCp.this_residual);
+ length = scsi_pointer->this_residual;
+ scsi_pointer->this_residual = read_wd33c93_count(regs);
+ scsi_pointer->ptr += length - scsi_pointer->this_residual;
DB(DB_TRANSFER,
- printk("%p/%d]", cmd->SCp.ptr, cmd->SCp.this_residual))
+ printk("%p/%d]", scsi_pointer->ptr, scsi_pointer->this_residual))
}
/* Respond to the specific WD3393 interrupt - there are quite a few! */
@@ -884,7 +831,7 @@ wd33c93_intr(struct Scsi_Host *instance)
/* construct an IDENTIFY message with correct disconnect bit */
hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun);
- if (cmd->SCp.phase)
+ if (scsi_pointer->phase)
hostdata->outgoing_msg[0] |= 0x40;
if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
@@ -926,8 +873,8 @@ wd33c93_intr(struct Scsi_Host *instance)
case CSR_UNEXP | PHS_DATA_IN:
case CSR_SRV_REQ | PHS_DATA_IN:
DB(DB_INTR,
- printk("IN-%d.%d", cmd->SCp.this_residual,
- cmd->SCp.buffers_residual))
+ printk("IN-%d.%d", scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual))
transfer_bytes(regs, cmd, DATA_IN_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
@@ -938,8 +885,8 @@ wd33c93_intr(struct Scsi_Host *instance)
case CSR_UNEXP | PHS_DATA_OUT:
case CSR_SRV_REQ | PHS_DATA_OUT:
DB(DB_INTR,
- printk("OUT-%d.%d", cmd->SCp.this_residual,
- cmd->SCp.buffers_residual))
+ printk("OUT-%d.%d", scsi_pointer->this_residual,
+ scsi_pointer->buffers_residual))
transfer_bytes(regs, cmd, DATA_OUT_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
@@ -962,8 +909,8 @@ wd33c93_intr(struct Scsi_Host *instance)
case CSR_UNEXP | PHS_STATUS:
case CSR_SRV_REQ | PHS_STATUS:
DB(DB_INTR, printk("STATUS="))
- cmd->SCp.Status = read_1_byte(regs);
- DB(DB_INTR, printk("%02x", cmd->SCp.Status))
+ scsi_pointer->Status = read_1_byte(regs);
+ DB(DB_INTR, printk("%02x", scsi_pointer->Status))
if (hostdata->level2 >= L2_BASIC) {
sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */
udelay(7);
@@ -991,7 +938,7 @@ wd33c93_intr(struct Scsi_Host *instance)
else
hostdata->incoming_ptr = 0;
- cmd->SCp.Message = msg;
+ scsi_pointer->Message = msg;
switch (msg) {
case COMMAND_COMPLETE:
@@ -1163,21 +1110,21 @@ wd33c93_intr(struct Scsi_Host *instance)
write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER);
if (phs == 0x60) {
DB(DB_INTR, printk("SX-DONE"))
- cmd->SCp.Message = COMMAND_COMPLETE;
+ scsi_pointer->Message = COMMAND_COMPLETE;
lun = read_wd33c93(regs, WD_TARGET_LUN);
- DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
+ DB(DB_INTR, printk(":%d.%d", scsi_pointer->Status, lun))
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
- cmd->SCp.Status = lun;
+ if (scsi_pointer->Status == ILLEGAL_STATUS_BYTE)
+ scsi_pointer->Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD) {
+ && scsi_pointer->Status != SAM_STAT_GOOD) {
set_host_byte(cmd, DID_ERROR);
} else {
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
- set_status_byte(cmd, cmd->SCp.Status);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
}
scsi_done(cmd);
@@ -1259,12 +1206,12 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
if (cmd->cmnd[0] == REQUEST_SENSE &&
- cmd->SCp.Status != SAM_STAT_GOOD) {
+ scsi_pointer->Status != SAM_STAT_GOOD) {
set_host_byte(cmd, DID_ERROR);
} else {
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
- set_status_byte(cmd, cmd->SCp.Status);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
}
scsi_done(cmd);
@@ -1293,14 +1240,14 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- DB(DB_INTR, printk(":%d", cmd->SCp.Status))
+ DB(DB_INTR, printk(":%d", scsi_pointer->Status))
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != SAM_STAT_GOOD) {
+ && scsi_pointer->Status != SAM_STAT_GOOD) {
set_host_byte(cmd, DID_ERROR);
} else {
set_host_byte(cmd, DID_OK);
- scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
- set_status_byte(cmd, cmd->SCp.Status);
+ scsi_msg_to_host_byte(cmd, scsi_pointer->Message);
+ set_status_byte(cmd, scsi_pointer->Status);
}
scsi_done(cmd);
break;
diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h
index 2edec34c5a42..e5e4254b1477 100644
--- a/drivers/scsi/wd33c93.h
+++ b/drivers/scsi/wd33c93.h
@@ -180,13 +180,8 @@
/* This is what the 3393 chip looks like to us */
typedef struct {
-#ifdef CONFIG_WD33C93_PIO
- unsigned int SASR;
- unsigned int SCMD;
-#else
volatile unsigned char *SASR;
volatile unsigned char *SCMD;
-#endif
} wd33c93_regs;
@@ -262,6 +257,10 @@ struct WD33C93_hostdata {
#endif
};
+static inline struct scsi_pointer *WD33C93_scsi_pointer(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
/* defines for hostdata->chip */
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 1a7947554581..ff1b22077251 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -196,7 +196,7 @@ static void wd719x_finish_cmd(struct wd719x_scb *scb, int result)
dma_unmap_single(&wd->pdev->dev, scb->phys,
sizeof(struct wd719x_scb), DMA_BIDIRECTIONAL);
scsi_dma_unmap(cmd);
- dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ dma_unmap_single(&wd->pdev->dev, scb->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
cmd->result = result << 16;
@@ -229,11 +229,11 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
/* map sense buffer */
scb->sense_buf_length = SCSI_SENSE_BUFFERSIZE;
- cmd->SCp.dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&wd->pdev->dev, cmd->SCp.dma_handle))
+ scb->dma_handle = dma_map_single(&wd->pdev->dev, cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&wd->pdev->dev, scb->dma_handle))
goto out_unmap_scb;
- scb->sense_buf = cpu_to_le32(cmd->SCp.dma_handle);
+ scb->sense_buf = cpu_to_le32(scb->dma_handle);
/* request autosense */
scb->SCB_options |= WD719X_SCB_FLAGS_AUTO_REQUEST_SENSE;
@@ -288,7 +288,7 @@ static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return 0;
out_unmap_sense:
- dma_unmap_single(&wd->pdev->dev, cmd->SCp.dma_handle,
+ dma_unmap_single(&wd->pdev->dev, scb->dma_handle,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
out_unmap_scb:
dma_unmap_single(&wd->pdev->dev, scb->phys, sizeof(*scb),
@@ -904,7 +904,8 @@ static int wd719x_pci_probe(struct pci_dev *pdev, const struct pci_device_id *d)
if (err)
goto fail;
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
dev_warn(&pdev->dev, "Unable to set 32-bit DMA mask\n");
goto disable_device;
}
diff --git a/drivers/scsi/wd719x.h b/drivers/scsi/wd719x.h
index abaabd419a54..966ab0fb4621 100644
--- a/drivers/scsi/wd719x.h
+++ b/drivers/scsi/wd719x.h
@@ -56,6 +56,7 @@ struct wd719x_scb {
u8 flags[2]; /* 62-63 SCB specific flags (local to each thread) */
/* everything below is for driver use (not used by card) */
dma_addr_t phys; /* bus address of the SCB */
+ dma_addr_t dma_handle;
struct scsi_cmnd *cmd; /* a copy of the pointer we were passed */
struct list_head list;
struct wd719x_sglist sg_list[WD719X_SG] __aligned(8); /* SG list */
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 12c10a5e3d93..66b316d173b0 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -58,9 +58,6 @@
#include <asm/xen/hypervisor.h>
-
-#define GRANT_INVALID_REF 0
-
#define VSCSIFRONT_OP_ADD_LUN 1
#define VSCSIFRONT_OP_DEL_LUN 2
#define VSCSIFRONT_OP_READD_LUN 3
@@ -83,6 +80,8 @@ struct vscsifrnt_shadow {
uint16_t rqid;
uint16_t ref_rqid;
+ bool inflight;
+
unsigned int nr_grants; /* number of grants in gref[] */
struct scsiif_request_segment *sg; /* scatter/gather elements */
struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
@@ -104,7 +103,11 @@ struct vscsifrnt_info {
struct xenbus_device *dev;
struct Scsi_Host *host;
- int host_active;
+ enum {
+ STATE_INACTIVE,
+ STATE_ACTIVE,
+ STATE_ERROR
+ } host_active;
unsigned int evtchn;
unsigned int irq;
@@ -217,6 +220,8 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
ring_req->seg[i] = shadow->seg[i];
+ shadow->inflight = true;
+
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
if (notify)
notify_remote_via_irq(info->irq);
@@ -224,6 +229,13 @@ static int scsifront_do_request(struct vscsifrnt_info *info,
return 0;
}
+static void scsifront_set_error(struct vscsifrnt_info *info, const char *msg)
+{
+ shost_printk(KERN_ERR, info->host, KBUILD_MODNAME "%s\n"
+ "Disabling device for further use\n", msg);
+ info->host_active = STATE_ERROR;
+}
+
static void scsifront_gnttab_done(struct vscsifrnt_info *info,
struct vscsifrnt_shadow *shadow)
{
@@ -233,17 +245,57 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
return;
for (i = 0; i < shadow->nr_grants; i++) {
- if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
- shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
- "grant still in use by backend\n");
- BUG();
+ if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
+ scsifront_set_error(info, "grant still in use by backend");
+ return;
}
- gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
kfree(shadow->sg);
}
+static unsigned int scsifront_host_byte(int32_t rslt)
+{
+ switch (XEN_VSCSIIF_RSLT_HOST(rslt)) {
+ case XEN_VSCSIIF_RSLT_HOST_OK:
+ return DID_OK;
+ case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT:
+ return DID_NO_CONNECT;
+ case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY:
+ return DID_BUS_BUSY;
+ case XEN_VSCSIIF_RSLT_HOST_TIME_OUT:
+ return DID_TIME_OUT;
+ case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET:
+ return DID_BAD_TARGET;
+ case XEN_VSCSIIF_RSLT_HOST_ABORT:
+ return DID_ABORT;
+ case XEN_VSCSIIF_RSLT_HOST_PARITY:
+ return DID_PARITY;
+ case XEN_VSCSIIF_RSLT_HOST_ERROR:
+ return DID_ERROR;
+ case XEN_VSCSIIF_RSLT_HOST_RESET:
+ return DID_RESET;
+ case XEN_VSCSIIF_RSLT_HOST_BAD_INTR:
+ return DID_BAD_INTR;
+ case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH:
+ return DID_PASSTHROUGH;
+ case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR:
+ return DID_SOFT_ERROR;
+ case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY:
+ return DID_IMM_RETRY;
+ case XEN_VSCSIIF_RSLT_HOST_REQUEUE:
+ return DID_REQUEUE;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED:
+ return DID_TRANSPORT_DISRUPTED;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST:
+ return DID_TRANSPORT_FAILFAST;
+ case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL:
+ return DID_TRANSPORT_MARGINAL;
+ default:
+ return DID_ERROR;
+ }
+}
+
static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
@@ -251,7 +303,6 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
struct scsi_cmnd *sc;
uint32_t id;
uint8_t sense_len;
- int result;
id = ring_rsp->rqid;
shadow = info->shadow[id];
@@ -260,14 +311,12 @@ static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
BUG_ON(sc == NULL);
scsifront_gnttab_done(info, shadow);
+ if (info->host_active == STATE_ERROR)
+ return;
scsifront_put_rqid(info, id);
- result = ring_rsp->rslt;
- if (result >> 24)
- set_host_byte(sc, DID_ERROR);
- else
- set_host_byte(sc, host_byte(result));
- set_status_byte(sc, result & 0xff);
+ set_host_byte(sc, scsifront_host_byte(ring_rsp->rslt));
+ set_status_byte(sc, XEN_VSCSIIF_RSLT_STATUS(ring_rsp->rslt));
scsi_set_resid(sc, ring_rsp->residual_len);
sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
@@ -291,7 +340,10 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
shadow->wait_reset = 1;
switch (shadow->rslt_reset) {
case RSLT_RESET_WAITING:
- shadow->rslt_reset = ring_rsp->rslt;
+ if (ring_rsp->rslt == XEN_VSCSIIF_RSLT_RESET_SUCCESS)
+ shadow->rslt_reset = SUCCESS;
+ else
+ shadow->rslt_reset = FAILED;
break;
case RSLT_RESET_ERR:
kick = _scsifront_put_rqid(info, id);
@@ -301,9 +353,7 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
scsifront_wake_up(info);
return;
default:
- shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
- "bad reset state %d, possibly leaking %u\n",
- shadow->rslt_reset, id);
+ scsifront_set_error(info, "bad reset state");
break;
}
spin_unlock_irqrestore(&info->shadow_lock, flags);
@@ -314,28 +364,41 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
static void scsifront_do_response(struct vscsifrnt_info *info,
struct vscsiif_response *ring_rsp)
{
- if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
- test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
- "illegal rqid %u returned by backend!\n", ring_rsp->rqid))
+ struct vscsifrnt_shadow *shadow;
+
+ if (ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
+ !info->shadow[ring_rsp->rqid]->inflight) {
+ scsifront_set_error(info, "illegal rqid returned by backend!");
return;
+ }
+ shadow = info->shadow[ring_rsp->rqid];
+ shadow->inflight = false;
- if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
+ if (shadow->act == VSCSIIF_ACT_SCSI_CDB)
scsifront_cdb_cmd_done(info, ring_rsp);
else
scsifront_sync_cmd_done(info, ring_rsp);
}
-static int scsifront_ring_drain(struct vscsifrnt_info *info)
+static int scsifront_ring_drain(struct vscsifrnt_info *info,
+ unsigned int *eoiflag)
{
- struct vscsiif_response *ring_rsp;
+ struct vscsiif_response ring_rsp;
RING_IDX i, rp;
int more_to_do = 0;
- rp = info->ring.sring->rsp_prod;
- rmb(); /* ordering required respective to dom0 */
+ rp = READ_ONCE(info->ring.sring->rsp_prod);
+ virt_rmb(); /* ordering required respective to backend */
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) {
+ scsifront_set_error(info, "illegal number of responses");
+ return 0;
+ }
for (i = info->ring.rsp_cons; i != rp; i++) {
- ring_rsp = RING_GET_RESPONSE(&info->ring, i);
- scsifront_do_response(info, ring_rsp);
+ RING_COPY_RESPONSE(&info->ring, i, &ring_rsp);
+ scsifront_do_response(info, &ring_rsp);
+ if (info->host_active == STATE_ERROR)
+ return 0;
+ *eoiflag &= ~XEN_EOI_FLAG_SPURIOUS;
}
info->ring.rsp_cons = i;
@@ -348,14 +411,15 @@ static int scsifront_ring_drain(struct vscsifrnt_info *info)
return more_to_do;
}
-static int scsifront_cmd_done(struct vscsifrnt_info *info)
+static int scsifront_cmd_done(struct vscsifrnt_info *info,
+ unsigned int *eoiflag)
{
int more_to_do;
unsigned long flags;
spin_lock_irqsave(info->host->host_lock, flags);
- more_to_do = scsifront_ring_drain(info);
+ more_to_do = scsifront_ring_drain(info, eoiflag);
info->wait_ring_available = 0;
@@ -369,20 +433,28 @@ static int scsifront_cmd_done(struct vscsifrnt_info *info)
static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
{
struct vscsifrnt_info *info = dev_id;
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
- while (scsifront_cmd_done(info))
+ if (info->host_active == STATE_ERROR) {
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ return IRQ_HANDLED;
+ }
+
+ while (scsifront_cmd_done(info, &eoiflag))
/* Yield point for this unbounded loop. */
cond_resched();
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
static void scsifront_finish_all(struct vscsifrnt_info *info)
{
- unsigned i;
+ unsigned int i, dummy;
struct vscsiif_response resp;
- scsifront_ring_drain(info);
+ scsifront_ring_drain(info, &dummy);
for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
if (test_bit(i, info->shadow_free_bitmap))
@@ -539,6 +611,9 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
unsigned long flags;
int err;
+ if (info->host_active == STATE_ERROR)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
sc->result = 0;
shadow->sc = sc;
@@ -591,6 +666,9 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
int err = 0;
+ if (info->host_active == STATE_ERROR)
+ return FAILED;
+
shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
if (!shadow)
return FAILED;
@@ -662,6 +740,9 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
struct vscsifrnt_info *info = shost_priv(sdev->host);
int err;
+ if (info->host_active == STATE_ERROR)
+ return -EIO;
+
if (info && current == info->curr) {
err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
@@ -709,27 +790,15 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
{
struct xenbus_device *dev = info->dev;
struct vscsiif_sring *sring;
- grant_ref_t gref;
- int err = -ENOMEM;
+ int err;
/***** Frontend to Backend ring start *****/
- sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
- if (!sring) {
- xenbus_dev_fatal(dev, err,
- "fail to allocate shared ring (Front to Back)");
+ err = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&sring, 1,
+ &info->ring_ref);
+ if (err)
return err;
- }
- SHARED_RING_INIT(sring);
- FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- err = xenbus_grant_ring(dev, sring, 1, &gref);
- if (err < 0) {
- free_page((unsigned long)sring);
- xenbus_dev_fatal(dev, err,
- "fail to grant shared ring (Front to Back)");
- return err;
- }
- info->ring_ref = gref;
+ XEN_FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err) {
@@ -737,7 +806,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
goto free_gnttab;
}
- err = bind_evtchn_to_irq(info->evtchn);
+ err = bind_evtchn_to_irq_lateeoi(info->evtchn);
if (err <= 0) {
xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
goto free_gnttab;
@@ -758,8 +827,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
free_irq:
unbind_from_irqhandler(info->irq, info);
free_gnttab:
- gnttab_end_foreign_access(info->ring_ref, 0,
- (unsigned long)info->ring.sring);
+ xenbus_teardown_ring((void **)&sring, 1, &info->ring_ref);
return err;
}
@@ -767,8 +835,7 @@ free_gnttab:
static void scsifront_free_ring(struct vscsifrnt_info *info)
{
unbind_from_irqhandler(info->irq, info);
- gnttab_end_foreign_access(info->ring_ref, 0,
- (unsigned long)info->ring.sring);
+ xenbus_teardown_ring((void **)&info->ring.sring, 1, &info->ring_ref);
}
static int scsifront_init_ring(struct vscsifrnt_info *info)
@@ -867,7 +934,7 @@ static int scsifront_probe(struct xenbus_device *dev,
goto free_sring;
}
info->host = host;
- info->host_active = 1;
+ info->host_active = STATE_ACTIVE;
xenbus_switch_state(dev, XenbusStateInitialised);
@@ -935,10 +1002,10 @@ static int scsifront_remove(struct xenbus_device *dev)
pr_debug("%s: %s removed\n", __func__, dev->nodename);
mutex_lock(&scsifront_mutex);
- if (info->host_active) {
+ if (info->host_active != STATE_INACTIVE) {
/* Scsi_host not yet removed */
scsi_remove_host(info->host);
- info->host_active = 0;
+ info->host_active = STATE_INACTIVE;
}
mutex_unlock(&scsifront_mutex);
@@ -962,9 +1029,9 @@ static void scsifront_disconnect(struct vscsifrnt_info *info)
*/
mutex_lock(&scsifront_mutex);
- if (info->host_active) {
+ if (info->host_active != STATE_INACTIVE) {
scsi_remove_host(host);
- info->host_active = 0;
+ info->host_active = STATE_INACTIVE;
}
mutex_unlock(&scsifront_mutex);
@@ -982,6 +1049,9 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
unsigned int hst, chn, tgt, lun;
struct scsi_device *sdev;
+ if (info->host_active == STATE_ERROR)
+ return;
+
dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
if (IS_ERR(dir))
return;
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index f1e5cf8a17d9..22d412cab91d 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -81,6 +81,7 @@ lasi_scsi_clock(void * hpa, int defaultclock)
static struct scsi_host_template zalon7xx_template = {
.module = THIS_MODULE,
.proc_name = "zalon7xx",
+ .cmd_size = sizeof(struct ncr_cmd_priv),
};
static int __init
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index 27b9e2baab1a..7acf9193a9e8 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
scsi_remove_host(host);
NCR_700_release(host);
+ if (host->base > 0x01000000)
+ iounmap(hostdata->base);
kfree(hostdata);
free_irq(host->irq, host);
zorro_release_device(z);